summaryrefslogtreecommitdiffstats
path: root/services/sync
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--services/sync/SyncComponents.manifest3
-rw-r--r--services/sync/Weave.sys.mjs190
-rw-r--r--services/sync/components.conf20
-rw-r--r--services/sync/docs/engines.rst133
-rw-r--r--services/sync/docs/external.rst8
-rw-r--r--services/sync/docs/index.rst17
-rw-r--r--services/sync/docs/overview.rst81
-rw-r--r--services/sync/docs/payload-evolution.md168
-rw-r--r--services/sync/docs/rust-engines.rst37
-rw-r--r--services/sync/golden_gate/Cargo.toml25
-rw-r--r--services/sync/golden_gate/src/error.rs71
-rw-r--r--services/sync/golden_gate/src/ferry.rs74
-rw-r--r--services/sync/golden_gate/src/lib.rs119
-rw-r--r--services/sync/golden_gate/src/log.rs161
-rw-r--r--services/sync/golden_gate/src/task.rs355
-rw-r--r--services/sync/modules-testing/fakeservices.sys.mjs114
-rw-r--r--services/sync/modules-testing/fxa_utils.sys.mjs55
-rw-r--r--services/sync/modules-testing/rotaryengine.sys.mjs120
-rw-r--r--services/sync/modules-testing/utils.sys.mjs319
-rw-r--r--services/sync/modules/SyncDisconnect.sys.mjs235
-rw-r--r--services/sync/modules/SyncedTabs.sys.mjs348
-rw-r--r--services/sync/modules/UIState.sys.mjs285
-rw-r--r--services/sync/modules/addonsreconciler.sys.mjs584
-rw-r--r--services/sync/modules/addonutils.sys.mjs391
-rw-r--r--services/sync/modules/bridged_engine.sys.mjs499
-rw-r--r--services/sync/modules/collection_validator.sys.mjs267
-rw-r--r--services/sync/modules/constants.sys.mjs133
-rw-r--r--services/sync/modules/doctor.sys.mjs201
-rw-r--r--services/sync/modules/engines.sys.mjs2274
-rw-r--r--services/sync/modules/engines/addons.sys.mjs818
-rw-r--r--services/sync/modules/engines/bookmarks.sys.mjs950
-rw-r--r--services/sync/modules/engines/clients.sys.mjs1122
-rw-r--r--services/sync/modules/engines/extension-storage.sys.mjs308
-rw-r--r--services/sync/modules/engines/forms.sys.mjs298
-rw-r--r--services/sync/modules/engines/history.sys.mjs654
-rw-r--r--services/sync/modules/engines/passwords.sys.mjs546
-rw-r--r--services/sync/modules/engines/prefs.sys.mjs503
-rw-r--r--services/sync/modules/engines/tabs.sys.mjs625
-rw-r--r--services/sync/modules/keys.sys.mjs166
-rw-r--r--services/sync/modules/main.sys.mjs23
-rw-r--r--services/sync/modules/policies.sys.mjs1055
-rw-r--r--services/sync/modules/record.sys.mjs1335
-rw-r--r--services/sync/modules/resource.sys.mjs292
-rw-r--r--services/sync/modules/service.sys.mjs1643
-rw-r--r--services/sync/modules/stages/declined.sys.mjs78
-rw-r--r--services/sync/modules/stages/enginesync.sys.mjs412
-rw-r--r--services/sync/modules/status.sys.mjs135
-rw-r--r--services/sync/modules/sync_auth.sys.mjs655
-rw-r--r--services/sync/modules/telemetry.sys.mjs1279
-rw-r--r--services/sync/modules/util.sys.mjs780
-rw-r--r--services/sync/moz.build72
-rw-r--r--services/sync/tests/tps/.eslintrc.js28
-rw-r--r--services/sync/tests/tps/addons/api/restartless-xpi@tests.mozilla.org.json21
-rw-r--r--services/sync/tests/tps/addons/api/test-webext@quality.mozilla.org.json21
-rw-r--r--services/sync/tests/tps/addons/restartless.xpibin0 -> 485 bytes
-rw-r--r--services/sync/tests/tps/addons/webextension.xpibin0 -> 3412 bytes
-rw-r--r--services/sync/tests/tps/all_tests.json34
-rw-r--r--services/sync/tests/tps/test_addon_reconciling.js45
-rw-r--r--services/sync/tests/tps/test_addon_restartless_xpi.js64
-rw-r--r--services/sync/tests/tps/test_addon_webext_xpi.js65
-rw-r--r--services/sync/tests/tps/test_addon_wipe.js31
-rw-r--r--services/sync/tests/tps/test_addresses.js84
-rw-r--r--services/sync/tests/tps/test_bookmark_conflict.js138
-rw-r--r--services/sync/tests/tps/test_bookmarks_in_same_named_folder.js53
-rw-r--r--services/sync/tests/tps/test_bug501528.js75
-rw-r--r--services/sync/tests/tps/test_bug530717.js47
-rw-r--r--services/sync/tests/tps/test_bug531489.js43
-rw-r--r--services/sync/tests/tps/test_bug535326.js148
-rw-r--r--services/sync/tests/tps/test_bug538298.js78
-rw-r--r--services/sync/tests/tps/test_bug546807.js38
-rw-r--r--services/sync/tests/tps/test_bug556509.js32
-rw-r--r--services/sync/tests/tps/test_bug562515.js90
-rw-r--r--services/sync/tests/tps/test_bug575423.js67
-rw-r--r--services/sync/tests/tps/test_client_wipe.js142
-rw-r--r--services/sync/tests/tps/test_creditcards.js62
-rw-r--r--services/sync/tests/tps/test_existing_bookmarks.js80
-rw-r--r--services/sync/tests/tps/test_extstorage.js154
-rw-r--r--services/sync/tests/tps/test_formdata.js63
-rw-r--r--services/sync/tests/tps/test_history.js129
-rw-r--r--services/sync/tests/tps/test_history_collision.js98
-rw-r--r--services/sync/tests/tps/test_passwords.js119
-rw-r--r--services/sync/tests/tps/test_prefs.js35
-rw-r--r--services/sync/tests/tps/test_privbrw_passwords.js105
-rw-r--r--services/sync/tests/tps/test_privbrw_tabs.js86
-rw-r--r--services/sync/tests/tps/test_special_tabs.js63
-rw-r--r--services/sync/tests/tps/test_sync.js403
-rw-r--r--services/sync/tests/tps/test_tabs.js42
-rw-r--r--services/sync/tests/unit/addon1-search.json21
-rw-r--r--services/sync/tests/unit/bootstrap1-search.json21
-rw-r--r--services/sync/tests/unit/head_appinfo.js58
-rw-r--r--services/sync/tests/unit/head_errorhandler_common.js195
-rw-r--r--services/sync/tests/unit/head_helpers.js709
-rw-r--r--services/sync/tests/unit/head_http_server.js1265
-rw-r--r--services/sync/tests/unit/missing-sourceuri.json20
-rw-r--r--services/sync/tests/unit/missing-xpi-search.json21
-rw-r--r--services/sync/tests/unit/prefs_test_prefs_store.js47
-rw-r--r--services/sync/tests/unit/rewrite-search.json21
-rw-r--r--services/sync/tests/unit/sync_ping_schema.json262
-rw-r--r--services/sync/tests/unit/systemaddon-search.json21
-rw-r--r--services/sync/tests/unit/test_412.js60
-rw-r--r--services/sync/tests/unit/test_addon_utils.js156
-rw-r--r--services/sync/tests/unit/test_addons_engine.js277
-rw-r--r--services/sync/tests/unit/test_addons_reconciler.js209
-rw-r--r--services/sync/tests/unit/test_addons_store.js750
-rw-r--r--services/sync/tests/unit/test_addons_tracker.js174
-rw-r--r--services/sync/tests/unit/test_addons_validator.js65
-rw-r--r--services/sync/tests/unit/test_bookmark_batch_fail.js25
-rw-r--r--services/sync/tests/unit/test_bookmark_decline_undecline.js48
-rw-r--r--services/sync/tests/unit/test_bookmark_engine.js1555
-rw-r--r--services/sync/tests/unit/test_bookmark_order.js586
-rw-r--r--services/sync/tests/unit/test_bookmark_places_query_rewriting.js57
-rw-r--r--services/sync/tests/unit/test_bookmark_record.js64
-rw-r--r--services/sync/tests/unit/test_bookmark_store.js425
-rw-r--r--services/sync/tests/unit/test_bookmark_tracker.js1275
-rw-r--r--services/sync/tests/unit/test_bridged_engine.js248
-rw-r--r--services/sync/tests/unit/test_clients_engine.js2108
-rw-r--r--services/sync/tests/unit/test_clients_escape.js57
-rw-r--r--services/sync/tests/unit/test_collection_getBatched.js187
-rw-r--r--services/sync/tests/unit/test_collections_recovery.js95
-rw-r--r--services/sync/tests/unit/test_corrupt_keys.js248
-rw-r--r--services/sync/tests/unit/test_declined.js194
-rw-r--r--services/sync/tests/unit/test_disconnect_shutdown.js101
-rw-r--r--services/sync/tests/unit/test_engine.js246
-rw-r--r--services/sync/tests/unit/test_engine_abort.js79
-rw-r--r--services/sync/tests/unit/test_engine_changes_during_sync.js611
-rw-r--r--services/sync/tests/unit/test_enginemanager.js232
-rw-r--r--services/sync/tests/unit/test_errorhandler_1.js341
-rw-r--r--services/sync/tests/unit/test_errorhandler_2.js550
-rw-r--r--services/sync/tests/unit/test_errorhandler_filelog.js473
-rw-r--r--services/sync/tests/unit/test_errorhandler_sync_checkServerError.js294
-rw-r--r--services/sync/tests/unit/test_extension_storage_engine.js275
-rw-r--r--services/sync/tests/unit/test_extension_storage_engine_kinto.js136
-rw-r--r--services/sync/tests/unit/test_extension_storage_migration_telem.js81
-rw-r--r--services/sync/tests/unit/test_extension_storage_tracker_kinto.js44
-rw-r--r--services/sync/tests/unit/test_form_validator.js86
-rw-r--r--services/sync/tests/unit/test_forms_store.js176
-rw-r--r--services/sync/tests/unit/test_forms_tracker.js78
-rw-r--r--services/sync/tests/unit/test_fxa_node_reassignment.js399
-rw-r--r--services/sync/tests/unit/test_fxa_service_cluster.js58
-rw-r--r--services/sync/tests/unit/test_history_engine.js429
-rw-r--r--services/sync/tests/unit/test_history_store.js570
-rw-r--r--services/sync/tests/unit/test_history_tracker.js251
-rw-r--r--services/sync/tests/unit/test_hmac_error.js250
-rw-r--r--services/sync/tests/unit/test_httpd_sync_server.js250
-rw-r--r--services/sync/tests/unit/test_interval_triggers.js472
-rw-r--r--services/sync/tests/unit/test_keys.js242
-rw-r--r--services/sync/tests/unit/test_load_modules.js59
-rw-r--r--services/sync/tests/unit/test_node_reassignment.js523
-rw-r--r--services/sync/tests/unit/test_password_engine.js1257
-rw-r--r--services/sync/tests/unit/test_password_store.js398
-rw-r--r--services/sync/tests/unit/test_password_tracker.js248
-rw-r--r--services/sync/tests/unit/test_password_validator.js176
-rw-r--r--services/sync/tests/unit/test_postqueue.js985
-rw-r--r--services/sync/tests/unit/test_prefs_engine.js134
-rw-r--r--services/sync/tests/unit/test_prefs_store.js391
-rw-r--r--services/sync/tests/unit/test_prefs_tracker.js93
-rw-r--r--services/sync/tests/unit/test_records_crypto.js189
-rw-r--r--services/sync/tests/unit/test_records_wbo.js85
-rw-r--r--services/sync/tests/unit/test_resource.js554
-rw-r--r--services/sync/tests/unit/test_resource_header.js63
-rw-r--r--services/sync/tests/unit/test_resource_ua.js96
-rw-r--r--services/sync/tests/unit/test_score_triggers.js151
-rw-r--r--services/sync/tests/unit/test_service_attributes.js92
-rw-r--r--services/sync/tests/unit/test_service_cluster.js61
-rw-r--r--services/sync/tests/unit/test_service_detect_upgrade.js274
-rw-r--r--services/sync/tests/unit/test_service_login.js224
-rw-r--r--services/sync/tests/unit/test_service_startOver.js91
-rw-r--r--services/sync/tests/unit/test_service_startup.js60
-rw-r--r--services/sync/tests/unit/test_service_sync_401.js90
-rw-r--r--services/sync/tests/unit/test_service_sync_locked.js47
-rw-r--r--services/sync/tests/unit/test_service_sync_remoteSetup.js241
-rw-r--r--services/sync/tests/unit/test_service_sync_specified.js150
-rw-r--r--services/sync/tests/unit/test_service_sync_updateEnabledEngines.js587
-rw-r--r--services/sync/tests/unit/test_service_verifyLogin.js118
-rw-r--r--services/sync/tests/unit/test_service_wipeClient.js78
-rw-r--r--services/sync/tests/unit/test_service_wipeServer.js240
-rw-r--r--services/sync/tests/unit/test_status.js83
-rw-r--r--services/sync/tests/unit/test_status_checkSetup.js26
-rw-r--r--services/sync/tests/unit/test_sync_auth_manager.js1027
-rw-r--r--services/sync/tests/unit/test_syncedtabs.js342
-rw-r--r--services/sync/tests/unit/test_syncengine.js302
-rw-r--r--services/sync/tests/unit/test_syncengine_sync.js1781
-rw-r--r--services/sync/tests/unit/test_syncscheduler.js1195
-rw-r--r--services/sync/tests/unit/test_tab_engine.js226
-rw-r--r--services/sync/tests/unit/test_tab_provider.js64
-rw-r--r--services/sync/tests/unit/test_tab_quickwrite.js204
-rw-r--r--services/sync/tests/unit/test_tab_tracker.js371
-rw-r--r--services/sync/tests/unit/test_telemetry.js1462
-rw-r--r--services/sync/tests/unit/test_tracker_addChanged.js59
-rw-r--r--services/sync/tests/unit/test_uistate.js324
-rw-r--r--services/sync/tests/unit/test_utils_catch.js119
-rw-r--r--services/sync/tests/unit/test_utils_deepEquals.js51
-rw-r--r--services/sync/tests/unit/test_utils_deferGetSet.js50
-rw-r--r--services/sync/tests/unit/test_utils_json.js95
-rw-r--r--services/sync/tests/unit/test_utils_keyEncoding.js23
-rw-r--r--services/sync/tests/unit/test_utils_lock.js76
-rw-r--r--services/sync/tests/unit/test_utils_makeGUID.js44
-rw-r--r--services/sync/tests/unit/test_utils_notify.js97
-rw-r--r--services/sync/tests/unit/test_utils_passphrase.js45
-rw-r--r--services/sync/tests/unit/xpcshell.toml304
-rw-r--r--services/sync/tps/extensions/tps/api.js77
-rw-r--r--services/sync/tps/extensions/tps/manifest.json23
-rw-r--r--services/sync/tps/extensions/tps/resource/auth/fxaccounts.sys.mjs209
-rw-r--r--services/sync/tps/extensions/tps/resource/logger.sys.mjs168
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/addons.sys.mjs93
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/bookmarkValidator.sys.mjs1063
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/bookmarks.sys.mjs833
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/formautofill.sys.mjs128
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/forms.sys.mjs205
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/history.sys.mjs158
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/passwords.sys.mjs187
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/prefs.sys.mjs122
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/tabs.sys.mjs92
-rw-r--r--services/sync/tps/extensions/tps/resource/modules/windows.sys.mjs32
-rw-r--r--services/sync/tps/extensions/tps/resource/quit.sys.mjs38
-rw-r--r--services/sync/tps/extensions/tps/resource/tps.sys.mjs1583
-rw-r--r--services/sync/tps/extensions/tps/schema.json1
217 files changed, 63204 insertions, 0 deletions
diff --git a/services/sync/SyncComponents.manifest b/services/sync/SyncComponents.manifest
new file mode 100644
index 0000000000..68c36cd80f
--- /dev/null
+++ b/services/sync/SyncComponents.manifest
@@ -0,0 +1,3 @@
+# Register resource aliases
+# (Note, for tests these are also set up in addResourceAlias)
+resource services-sync resource://gre/modules/services-sync/
diff --git a/services/sync/Weave.sys.mjs b/services/sync/Weave.sys.mjs
new file mode 100644
index 0000000000..05a7031a73
--- /dev/null
+++ b/services/sync/Weave.sys.mjs
@@ -0,0 +1,190 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+const lazy = {};
+ChromeUtils.defineESModuleGetters(lazy, {
+ CLIENT_NOT_CONFIGURED: "resource://services-sync/constants.sys.mjs",
+ FileUtils: "resource://gre/modules/FileUtils.sys.mjs",
+});
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "syncUsername",
+ "services.sync.username"
+);
+
+/**
+ * Sync's XPCOM service.
+ *
+ * It is named "Weave" for historical reasons.
+ *
+ * It's worth noting how Sync is lazily loaded. We register a timer that
+ * loads Sync a few seconds after app startup. This is so Sync does not
+ * adversely affect application start time.
+ *
+ * If Sync is not configured, no extra Sync code is loaded. If an
+ * external component (say the UI) needs to interact with Sync, it
+ * should use the promise-base function whenLoaded() - something like the
+ * following:
+ *
+ * // 1. Grab a handle to the Sync XPCOM service.
+ * let service = Cc["@mozilla.org/weave/service;1"]
+ * .getService(Components.interfaces.nsISupports)
+ * .wrappedJSObject;
+ *
+ * // 2. Use the .then method of the promise.
+ * service.whenLoaded().then(() => {
+ * // You are free to interact with "Weave." objects.
+ * return;
+ * });
+ *
+ * And that's it! However, if you really want to avoid promises and do it
+ * old-school, then
+ *
+ * // 1. Get a reference to the service as done in (1) above.
+ *
+ * // 2. Check if the service has been initialized.
+ * if (service.ready) {
+ * // You are free to interact with "Weave." objects.
+ * return;
+ * }
+ *
+ * // 3. Install "ready" listener.
+ * Services.obs.addObserver(function onReady() {
+ * Services.obs.removeObserver(onReady, "weave:service:ready");
+ *
+ * // You are free to interact with "Weave." objects.
+ * }, "weave:service:ready", false);
+ *
+ * // 4. Trigger loading of Sync.
+ * service.ensureLoaded();
+ */
+export function WeaveService() {
+ this.wrappedJSObject = this;
+ this.ready = false;
+}
+
+WeaveService.prototype = {
+ classID: Components.ID("{74b89fb0-f200-4ae8-a3ec-dd164117f6de}"),
+
+ QueryInterface: ChromeUtils.generateQI([
+ "nsIObserver",
+ "nsISupportsWeakReference",
+ ]),
+
+ get Weave() {
+ const { Weave } = ChromeUtils.importESModule(
+ "resource://services-sync/main.sys.mjs"
+ );
+ return Weave;
+ },
+
+ ensureLoaded() {
+ // Side-effect of accessing the service is that it is instantiated.
+ this.Weave.Service;
+ },
+
+ whenLoaded() {
+ if (this.ready) {
+ return Promise.resolve();
+ }
+ let onReadyPromise = new Promise(resolve => {
+ Services.obs.addObserver(function onReady() {
+ Services.obs.removeObserver(onReady, "weave:service:ready");
+ resolve();
+ }, "weave:service:ready");
+ });
+ this.ensureLoaded();
+ return onReadyPromise;
+ },
+
+ init() {
+ // Force Weave service to load if it hasn't triggered from overlays
+ this.timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ this.timer.initWithCallback(
+ {
+ notify: () => {
+ let isConfigured = false;
+ // We only load more if it looks like Sync is configured.
+ if (this.enabled) {
+ // We have an associated FxAccount. So, do a more thorough check.
+ // This will import a number of modules and thus increase memory
+ // accordingly. We could potentially copy code performed by
+ // this check into this file if our above code is yielding too
+ // many false positives.
+ var { Weave } = ChromeUtils.importESModule(
+ "resource://services-sync/main.sys.mjs"
+ );
+ isConfigured =
+ Weave.Status.checkSetup() != lazy.CLIENT_NOT_CONFIGURED;
+ }
+ if (isConfigured) {
+ this.ensureLoaded();
+ }
+ },
+ },
+ 10000,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ },
+
+ /**
+ * Whether Sync appears to be enabled.
+ *
+ * This returns true if we have an associated FxA account and Sync is enabled.
+ *
+ * It does *not* perform a robust check to see if the client is working.
+ * For that, you'll want to check Weave.Status.checkSetup().
+ */
+ get enabled() {
+ return (
+ !!lazy.syncUsername &&
+ Services.prefs.getBoolPref("identity.fxaccounts.enabled")
+ );
+ },
+};
+
+export function AboutWeaveLog() {}
+AboutWeaveLog.prototype = {
+ classID: Components.ID("{d28f8a0b-95da-48f4-b712-caf37097be41}"),
+
+ QueryInterface: ChromeUtils.generateQI([
+ "nsIAboutModule",
+ "nsISupportsWeakReference",
+ ]),
+
+ getURIFlags(aURI) {
+ return 0;
+ },
+
+ newChannel(aURI, aLoadInfo) {
+ let dir = lazy.FileUtils.getDir("ProfD", ["weave", "logs"]);
+ try {
+ dir.create(Ci.nsIFile.DIRECTORY_TYPE, lazy.FileUtils.PERMS_DIRECTORY);
+ } catch (ex) {
+ if (ex.result != Cr.NS_ERROR_FILE_ALREADY_EXISTS) {
+ throw ex;
+ }
+ // Ignore the exception due to a directory that already exists.
+ }
+ let uri = Services.io.newFileURI(dir);
+ let channel = Services.io.newChannelFromURIWithLoadInfo(uri, aLoadInfo);
+
+ channel.originalURI = aURI;
+
+ // Ensure that the about page has the same privileges as a regular directory
+ // view. That way links to files can be opened. make sure we use the correct
+ // origin attributes when creating the principal for accessing the
+ // about:sync-log data.
+ let principal = Services.scriptSecurityManager.createContentPrincipal(
+ uri,
+ aLoadInfo.originAttributes
+ );
+
+ channel.owner = principal;
+ return channel;
+ },
+};
diff --git a/services/sync/components.conf b/services/sync/components.conf
new file mode 100644
index 0000000000..e4f82b35b1
--- /dev/null
+++ b/services/sync/components.conf
@@ -0,0 +1,20 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ 'cid': '{74b89fb0-f200-4ae8-a3ec-dd164117f6de}',
+ 'contract_ids': ['@mozilla.org/weave/service;1'],
+ 'esModule': 'resource://services-sync/Weave.sys.mjs',
+ 'constructor': 'WeaveService',
+ },
+ {
+ 'cid': '{d28f8a0b-95da-48f4-b712-caf37097be41}',
+ 'contract_ids': ['@mozilla.org/network/protocol/about;1?what=sync-log'],
+ 'esModule': 'resource://services-sync/Weave.sys.mjs',
+ 'constructor': 'AboutWeaveLog',
+ },
+]
diff --git a/services/sync/docs/engines.rst b/services/sync/docs/engines.rst
new file mode 100644
index 0000000000..7a4fa721af
--- /dev/null
+++ b/services/sync/docs/engines.rst
@@ -0,0 +1,133 @@
+============================
+The Sync engines in the tree
+============================
+
+Unless otherwise specified, the engine implementations can be found
+`here <https://searchfox.org/mozilla-central/source/services/sync/modules/engines>`_
+
+Please read the :doc:`overview`.
+
+Clients
+=======
+
+The ``clients`` engine is a special engine in that it's invisible to the
+user and can not be disabled - think of it as a "meta" engine. As such, it
+doesn't really have a sensible concept of ``store`` or ``tracker``.
+
+The engine is mainly responsible for keeping its own record current in the
+``clients`` collection. Some parts of Sync use this collection to know what
+other clients exist and when they last synced (although alot of this is moving
+to using the Firefox Accounts devices).
+
+Clients also has the ability to handle ``commands`` - in short, some other
+client can write to this client's ``commands``, and when this client notices,
+it will execute the command. Commands aren't arbitrary, so commands must be
+understood by both sides for them to work. There are commands to "wipe"
+collections etc. In practice, this is used only by ``bookmarks`` when a device
+restores bookmarks - in that case, the restoring device will send a ``wipe``
+command to all other clients so that they take the new bookmarks instead of
+merging them.
+
+If not for this somewhat limited ``commands`` functionality, this engine could
+be considered deprecated and subsumed by FxA devices - but because we
+can't just remove support for commands and also do not have a plan for
+replacing them, the clients engine remains important.
+
+Bookmarks
+=========
+
+The ``bookmarks`` engine has changed so that it's tightly integrated with the
+``places`` database. Instead of an external ``tracker``, the tracking is
+integrated into Places. Each bookmark has a `syncStatus` and a
+`syncChangeCounter` and these are managed internally by places. Sync then just
+queries for changed bookmarks by looking for these fields.
+
+Bookmarks is somewhat unique in that it needs to maintain a tree structure,
+which makes merging a challenge. The `dogear <https://github.com/mozilla/dogear>`_
+component (written in Rust and also used by the
+`application-services bookmarks component <https://github.com/mozilla/application-services/tree/main/components/places>`_)
+performs this merging.
+
+Bookmarks also pioneered the concept of a "mirror" - this is a database table
+which tracks exactly what is on the server. Because each sync only fetches
+changes from the server since the last sync, each sync does not supply every
+record on the server. However, the merging code does need to know what's on
+the server - so the mirror tracks this.
+
+History
+=======
+
+History is similar to bookmarks described above - it's closely integrated with
+places - but is less complex because there's no tree structure involved.
+
+One unique characteristic of history is that the engine takes steps to *not*
+upload everything - old profiles tend to have too much history to reasonably
+store and upload, so typically uploads are limited to the last 5000 visits.
+
+Logins
+======
+
+Logins has also been upgraded to be closely integrated with `Services.logins` -
+the logins component itself manages the metadata.
+
+Tabs
+====
+
+Tabs is a special engine in that there's no underlying storage at all - it
+both saves the currently open tabs from this device (which are enumerated
+every time it's updated) and also lets other parts of Firefox know which tabs
+are open on other devices. There's no database - if we haven't synced yet we
+don't know what other tabs are open, and when we do know, the list is just
+stored in memory.
+
+The `SyncedTabs module <https://searchfox.org/mozilla-central/source/services/sync/modules/SyncedTabs.jsm>`_
+is the main interface the browser uses to get the list of tabs from other
+devices.
+
+Add-ons
+=======
+
+Addons is still an "old school" engine, with a tracker and store which aren't
+closely integrated with the addon manager. As a result it's fairly complex and
+error prone - eg, it persists the "last known" state so it can know what to
+sync, where a better model would be for the addon manager to track the changes
+on Sync's behalf.
+
+It also attempts to sync themes etc. The future of this engine isn't clear given
+it doesn't work on mobile platforms.
+
+Addresses / Credit-Cards
+========================
+
+Addresses and Credit-cards have Sync functionality tightly bound with the
+store. Unlike other engines above, this engine has always been tightly bound,
+because it was written after we realized this tight-binding was a feature and
+not a bug.
+
+Technically these are 2 separate engines and collections. However, because the
+underlying storage uses a shared implementation, the syncing also uses a
+shared implementation - ie, the same logic is used for both - so we tend to
+treat them as a single engine in practice.
+
+As a result, only a shim is in the `services/sync/modules/engines/` directory,
+while the actual logic is
+`next to the storage implementation <https://searchfox.org/mozilla-central/source/toolkit/components/formautofill/FormAutofillSync.jsm>`_.
+
+This engine has a unique twist on the "mirror" concept described above -
+whenever a change is made to a fields, the original value of the field is
+stored directly in the storage. This means that on the next sync, the value
+of the record on the server can be deduced, meaning a "3-way" merge can be
+done, so it can better tell the difference between local only, remote only, or
+conflicting changes.
+
+WebExt-Storage
+==============
+
+webext-storage is implemented in Rust and lives in
+`application services <https://github.com/mozilla/application-services/tree/main/components/webext-storage>`_
+and is vendored into the `addons code <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge>`_ -
+note that this includes the storage *and* Sync code. The Sync engine itself
+is a shim in the sync directory.
+
+See the :doc:`rust-engines` document for more about how rust engines are
+integrated.
diff --git a/services/sync/docs/external.rst b/services/sync/docs/external.rst
new file mode 100644
index 0000000000..f7cebde32d
--- /dev/null
+++ b/services/sync/docs/external.rst
@@ -0,0 +1,8 @@
+==============
+External Links
+==============
+
+Some external links that might be of interest:
+
+* `Information about the server APIs <https://mozilla-services.readthedocs.io/en/latest/index.html>`_
+* `Some external Sync Client docs <https://mozilla-services.readthedocs.io/en/latest/sync/index.html>`_
diff --git a/services/sync/docs/index.rst b/services/sync/docs/index.rst
new file mode 100644
index 0000000000..37ce3c19a0
--- /dev/null
+++ b/services/sync/docs/index.rst
@@ -0,0 +1,17 @@
+====
+Sync
+====
+
+This documents the sync implementation inside mozilla-central. It assumes
+a general understanding of what Sync is and how it works at a high level - you
+can find `some external docs <https://mozilla-services.readthedocs.io/en/latest/sync/>`_
+which can help with this.
+
+.. toctree::
+ :maxdepth: 1
+
+ overview
+ engines
+ rust-engines
+ payload-evolution
+ external
diff --git a/services/sync/docs/overview.rst b/services/sync/docs/overview.rst
new file mode 100644
index 0000000000..e956090d70
--- /dev/null
+++ b/services/sync/docs/overview.rst
@@ -0,0 +1,81 @@
+====================
+Introduction to Sync
+====================
+
+This document is a brief introduction to how Sync is implemented in desktop Firefox.
+
+General, Historical, Anatomy of a Sync Engine
+=============================================
+
+This section describes how Sync used to work - and indeed, how much of it still
+does. While we discuss how this is slowly changing, this context is valuable.
+
+For any datatype which syncs, there tends to be 3 parts:
+
+Store
+-----
+
+The sync ``store`` interfaces with the actual Firefox desktop store. For example,
+in the ``passwords`` engine, the "store" is that layer that talks to
+``Services.logins``
+
+Tracker
+-------
+
+The ``tracker`` is what knows that something should be synced. For example,
+when the user creates or updates a password, it is the tracker that knows
+we should sync now, and what particular password(s) should be updated.
+
+This is typically done via "observer" notifications - ``Services.logins``,
+``places`` etc all send specific notifications when certain events happen
+(and indeed, some of these were added for Sync's benefit)
+
+Engine
+------
+
+The ``engine`` ties it all together. It works with the ``store`` and
+``tracker`` and tracks its own metadata (eg, the timestamp of the passwords on
+the server, so it knows how to grab just changed records and how to pass them
+off to the ``store`` so the actual underlying storage can be updated.
+
+All of the above parts were typically in the
+`services/sync/modules/engines directory <https://searchfox.org/mozilla-central/source/services/sync/modules/engines>`_
+directory and decoupled from the data they were syncing.
+
+
+The Future of Desktop-Specific Sync Engines
+===========================================
+
+The system described above reflects the fact that Sync was "bolted on" to
+Desktop Firefox relatively late - eg, the Sync ``store`` is decoupled from the
+actual ``store``. This has causes a number of problems - particularly around
+the ``tracker`` and the metadata used by the engine, and the fact that changes
+to the backing store would often forget that Sync existed.
+
+Over the last few years, the Sync team has come to the conclusion that Sync
+support must be integrated much closer to the store itself. For example,
+``Services.logins`` should track when something has changed that would cause
+an item to be synced. It should also track the metadata for the store so that
+if (say) a corrupt database is recovered by creating a new, empty one, the
+metadata should also vanish so Sync knows something bad has happened and can
+recover.
+
+However, this is a slow process - currently the ``bookmarks``, ``history`` and
+``passwords`` legacy engines have been improved so more responsibility is taken
+by the stores. In all cases, for implementation reasons, the Sync
+implementation still has a ``store``, but it tends to be a thin wrapper around
+the actual underlying store.
+
+The Future of Cross-Platform Sync Engines
+=========================================
+
+There are a number of Sync engines implemented in Rust and which live in the
+application-services repository. While these were often done for mobile
+platforms, the longer term hope is that they can be reused on Desktop.
+:doc:`engines` has more details on these.
+
+While no existing engines have been replaced with Rust implemented engines,
+the webext-storage engine is implemented in Rust via application-services, so
+doesn't tend to use any of the infrastructure described above.
+
+Hopefully over time we will find more Rust-implemented engines in Desktop.
diff --git a/services/sync/docs/payload-evolution.md b/services/sync/docs/payload-evolution.md
new file mode 100644
index 0000000000..e195ee545d
--- /dev/null
+++ b/services/sync/docs/payload-evolution.md
@@ -0,0 +1,168 @@
+# Handling the evolution of Sync payloads
+
+(Note that this document has been written in the format of an [application-services ADR](https://github.com/mozilla/application-services/blob/main/docs/adr/0000-use-markdown-architectural-decision-records.md)
+but the relelvant teams decided that ultimately the best home for this doc is in mozilla-central)
+
+* Status: Accepted
+* Deciders: sync team, credentials management team
+* Date: 2023-03-15
+
+Technical Story:
+* https://github.com/mozilla/application-services/pull/5434
+* https://docs.google.com/document/d/1ToLOERA5HKzEzRVZNv6Ohv_2wZaujW69pVb1Kef2jNY
+
+## Context and Problem Statement
+
+Sync exists on all platforms (Desktop, Android, iOS), all channels (Nightly, Beta, Release, ESR) and is heavily used across all Firefox features.
+Whenever there are feature changes or requests that potentially involve schema changes, there are not a lot of good options to ensure sync doesn’t break for any specific client.
+Since sync data is synced from all channels, we need to make sure each client can handle the new data and that all channels can support the new schema.
+Issues like [credit card failing on android and desktop release channels due to schema change on desktop Nightly](https://bugzilla.mozilla.org/show_bug.cgi?id=1812235)
+are examples of such cases we can run into.
+This document describes our decision on how we will support payload evolution over time.
+
+Note that even though this document exists in the application-services repository, it should
+be considered to apply to all sync implementations, whether in this repository, in mozilla-central,
+or anywhere else.
+
+## Definitions
+
+* A "new" Firefox installation is a version of Firefox which has a change to a Sync payload which
+ is not yet understood by "recent" versions. The most common example would be a Nightly version
+ of Firefox with a new feature not yet on the release channel.
+
+* A "recent" Firefox installation is a version older than a "new" version, which does not understand
+ or have support for new features in "new" versions, but which we still want to support without
+ breakage and without the user perceiving data-loss. This is typically accepted to mean the
+ current ESR version or later, but taking into account the slow update when new ESRs are released.
+
+* An "old" version is any version before what we consider "recent".
+
+
+## Decision Drivers
+
+* It must be possible to change what data is carried by Sync to meet future product requirements.
+* Both desktop and mobile platforms must be considered.
+* We must not break "recent" Firefox installations when a "new" Firefox installation syncs, and vice-versa.
+* Round-tripping data from a "new" Firefox installation through a "recent" Firefox installation must not discard any of the new data, and vice-versa.
+* Some degree of breakage for "old" Firefox installations when "new" or "recent" firefoxes sync
+ might be considered acceptable if absolutely necessary.
+* However, breakage of "new" or "recent" Firefoxes when an "old" version syncs is *not* acceptable.
+* Because such evolution should be rare, we do not want to set an up-front policy about locking out
+ "old" versions just because they might have a problem in the future. That is, we want to avoid
+ a policy that dictates versions more than (say) 2 years old will break when syncing "just in case"
+* Any solution to this must be achievable in a relatively short timeframe as we know of product
+ asks coming down the line which require this capability.
+
+## Considered Options
+
+* A backwards compatible schema policy, consisting of (a) having engines "round trip" data they
+ do not know about and (b) never changing the semantics of existing data.
+* A policy which prevents "recent" clients from syncing, or editing data, or other restrictions.
+* A formal schema-driven process.
+* Consider the sync payloads frozen and never change them.
+* Use separate collections for new data
+
+## Decision Outcome
+
+Chosen option: A backwards compatible schema policy because it is very flexible and the only option
+meeting the decision drivers.
+
+## Pros and Cons of the Options
+
+### A backwards compatible schema policy
+
+A summary of this option is a policy by which:
+
+* Every sync engine must arrange to persist any fields from the payload which it
+ does not understand. The next time that engine needs to upload that record to the storage server,
+ it must arrange to add all such "unknown" fields back into the payload.
+
+* Different engines must identify different locations where this might happen. For example, the
+ `passwords` engine would identify the "root" of the payload, `addresses` and `creditcards` would
+ identify the `entry` sub-object in the payload, while the history engine would probably identify
+ *both* the root of the payload and the `visits` array.
+
+* Fields can not change type, nor be removed for a significant amount of time. This might mean
+ that "new" clients must support both new fields *and* fields which are considered deprecated
+ by these "new" clients because they are still used by "recent" versions.
+
+The pros and cons:
+
+* Good, because it meets the requirements.
+
+* Good, because the initial set of work identified is relatively simple to implement (that work
+ specifically is to support the round-tripping of "unknown" fields, in the hope that by the
+ time actual schema changes are proposed, this round-trip capability will then be on all "recent"
+ versions)
+
+* Bad, because the inability to deprecate or change existing fields means that
+ some evolution tasks become complicated. For example, consider a hypothetical change where
+ we wanted to change from "street/city/state" fields into a free-form "address" field. New
+ Firefox versions would need to populate *both* new and old fields when writing to the server,
+ and handle the fact that only the old versions might be updated when it sees an incoming
+ record written by a "recent" or "old" versions of Firefox. However, this should be rare.
+
+* Bad, because it's not possible to prove a proposed change meets the requirements - the policy
+ is informal and requires good judgement as changes are proposed.
+
+### A policy which prevents "recent" clients from syncing, or editing data
+
+Proposals which fit into this category might have been implemented by (say) adding
+a version number to the schema, and if clients did not fully understand the schema it would
+either prevent syncing the record, or sync it but not allow editing it, or similar.
+
+This was rejected because:
+
+* The user would certainly perceive data-loss if we ignored the incoming data entirely.
+* If we still wanted older versions to "partially" see the record (eg, but disallow editing) we'd
+ still need most of the chosen option anyway - specifically, we could still never
+ deprecate fields etc.
+* The UI/UX of trying to explain to the user why they can't edit a record was deemed impossible
+ to do in a satisfactory way.
+* This would effectively penalize users who chose to use Nightly Firefoxes in any way. Simply
+ allowing a Nightly to sync would effectively break Release/Mobile Firefox versions.
+
+### A formal schema-driven process.
+
+Ideally we could formally describe schemas, but we can't come up with anything here which
+works with the constraints of supporting older clients - we simply can't update older released
+Firefoxes so they know how to work with the new schemas. We also couldn't come up with a solution
+where a schema is downloaded dynamically which also allowed the *semantics* (as opposed to simply
+validity) of new fields to be described.
+
+### Consider the sync payloads frozen and never change them.
+
+A process where payloads are frozen was rejected because:
+
+* The most naive approach here would not meet the needs of Firefox in the future.
+
+* A complicated system where we started creating new payload and new collections
+ (ie, freezing "old" schemas but then creating "new" schemas only understood by
+ newer clients) could not be conceived in a way that still met the requirements,
+ particularly around data-loss for older clients. For example, adding a credit-card
+ on a Nightly version but having it be completely unavailable on a release firefox
+ isn't acceptable.
+
+### Use separate collections for new data
+
+We could store the new data in a separate collection. For example define a
+bookmarks2 collection where each record has the same guid as one in bookmarks alongside any new fields.
+Newer clients use both collections to sync.
+
+The pros and cons:
+
+* Good, because it allows newer clients to sync new data without affecting recent or older clients
+* Bad, because sync writes would lose atomicity without server changes.
+ We can currently write to a single collection in an atomic way, but don't have a way to write to multiple collections.
+* Bad because this number of collections grows each time we want to add fields.
+* Bad because it potentially leaks extra information to an attacker that gets access to the encrypted server records.
+ For example if we added a new collection for a single field, then the attacker could guess if that
+ field was set or not based on the size of the encrypted record.
+* Bad because it's difficult to handle nested data with this approach,
+ for example adding a field to a history record visit.
+* Bad because it has the same issue with dependent data as the chosen solution.
+
+## Links <!-- optional -->
+
+* This document was originally [brain-stormed in this google docs document](https://docs.google.com/document/d/1ToLOERA5HKzEzRVZNv6Ohv_2wZaujW69pVb1Kef2jNY),
+ which may be of interest for historical context, but should not be considered part of this ADR.
diff --git a/services/sync/docs/rust-engines.rst b/services/sync/docs/rust-engines.rst
new file mode 100644
index 0000000000..af00fd6619
--- /dev/null
+++ b/services/sync/docs/rust-engines.rst
@@ -0,0 +1,37 @@
+================================
+How Rust Engines are implemented
+================================
+
+There are 2 main components to engines implemented in Rust
+
+The bridged-engine
+==================
+
+Because Rust engines still need to work with the existing Sync infrastructure,
+there's the concept of a `bridged-engine <https://searchfox.org/mozilla-central/source/services/sync/modules/bridged_engine.js>`_.
+In short, this is just a shim between the existing
+`Sync Service <https://searchfox.org/mozilla-central/source/services/sync/modules/service.js>`_
+and the Rust code.
+
+The bridge
+==========
+
+`"Golden Gate" <https://searchfox.org/mozilla-central/source/services/sync/golden_gate>`_
+is a utility to help bridge any Rust implemented Sync engines with desktop. In
+other words, it's a "rusty bridge" - get it? Get it? Yet another of Lina's puns
+that live on!
+
+One of the key challenges with integrating a Rust Sync component with desktop
+is the different threading models. The Rust code tends to be synchronous -
+most functions block the calling thread to do the disk or network IO necessary
+to work - it assumes that the consumer will delegate this to some other thread.
+
+So golden_gate is this background thread delegation for a Rust Sync engine -
+gecko calls golden-gate on the main thread, it marshalls the call to a worker
+thread, and the result is marshalled back to the main thread.
+
+It's worth noting that golden_gate is just for the Sync engine part - other
+parts of the component (ie, the part that provides the functionality that's not
+sync related) will have its own mechanism for this. For example, the
+`webext-storage bridge <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge/src>`_
+uses a similar technique `which has some in-depth documentation <../../toolkit/components/extensions/webextensions/webext-storage.html>`_.
diff --git a/services/sync/golden_gate/Cargo.toml b/services/sync/golden_gate/Cargo.toml
new file mode 100644
index 0000000000..3f94e1a1e9
--- /dev/null
+++ b/services/sync/golden_gate/Cargo.toml
@@ -0,0 +1,25 @@
+[package]
+name = "golden_gate"
+description = "A bridge for wiring up Sync engines implemented in Rust"
+version = "0.1.0"
+authors = ["The Firefox Sync Developers <sync-team@mozilla.com>"]
+edition = "2018"
+license = "MPL-2.0"
+
+[dependencies]
+anyhow = "1"
+atomic_refcell = "0.1"
+cstr = "0.2"
+interrupt-support = "0.1"
+log = "0.4"
+moz_task = { path = "../../../xpcom/rust/moz_task" }
+nserror = { path = "../../../xpcom/rust/nserror" }
+nsstring = { path = "../../../xpcom/rust/nsstring" }
+serde_json = "1"
+storage_variant = { path = "../../../storage/variant" }
+sync15 = "0.1"
+xpcom = { path = "../../../xpcom/rust/xpcom" }
+
+[dependencies.thin-vec]
+version = "0.2.1"
+features = ["gecko-ffi"]
diff --git a/services/sync/golden_gate/src/error.rs b/services/sync/golden_gate/src/error.rs
new file mode 100644
index 0000000000..373d20756e
--- /dev/null
+++ b/services/sync/golden_gate/src/error.rs
@@ -0,0 +1,71 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::{error, fmt, result, str::Utf8Error};
+
+use nserror::{nsresult, NS_ERROR_INVALID_ARG, NS_ERROR_UNEXPECTED};
+use serde_json::Error as JsonError;
+
+/// A specialized `Result` type for Golden Gate.
+pub type Result<T> = result::Result<T, Error>;
+
+/// The error type for Golden Gate errors.
+#[derive(Debug)]
+pub enum Error {
+ /// A wrapped XPCOM error.
+ Nsresult(nsresult),
+
+ /// A ferry didn't run on the background task queue.
+ DidNotRun(&'static str),
+
+ /// A string contains invalid UTF-8 or JSON.
+ MalformedString(Box<dyn error::Error + Send + Sync + 'static>),
+}
+
+impl error::Error for Error {
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self {
+ Error::MalformedString(error) => Some(error.as_ref()),
+ _ => None,
+ }
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Error::Nsresult(result) => write!(f, "Operation failed with {}", result.error_name()),
+ Error::DidNotRun(what) => write!(f, "Failed to run `{what}` on background thread"),
+ Error::MalformedString(error) => error.fmt(f),
+ }
+ }
+}
+
+impl From<nsresult> for Error {
+ fn from(result: nsresult) -> Error {
+ Error::Nsresult(result)
+ }
+}
+
+impl From<Utf8Error> for Error {
+ fn from(error: Utf8Error) -> Error {
+ Error::MalformedString(error.into())
+ }
+}
+
+impl From<JsonError> for Error {
+ fn from(error: JsonError) -> Error {
+ Error::MalformedString(error.into())
+ }
+}
+
+impl From<Error> for nsresult {
+ fn from(error: Error) -> nsresult {
+ match error {
+ Error::DidNotRun(_) => NS_ERROR_UNEXPECTED,
+ Error::Nsresult(result) => result,
+ Error::MalformedString(_) => NS_ERROR_INVALID_ARG,
+ }
+ }
+}
diff --git a/services/sync/golden_gate/src/ferry.rs b/services/sync/golden_gate/src/ferry.rs
new file mode 100644
index 0000000000..99994811ab
--- /dev/null
+++ b/services/sync/golden_gate/src/ferry.rs
@@ -0,0 +1,74 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use nsstring::nsCString;
+use storage_variant::VariantType;
+use sync15::Guid;
+use xpcom::{interfaces::nsIVariant, RefPtr};
+
+/// An operation that runs on the background thread, and optionally passes a
+/// result to its callback.
+pub enum Ferry {
+ LastSync,
+ SetLastSync(i64),
+ SyncId,
+ ResetSyncId,
+ EnsureCurrentSyncId(String),
+ SyncStarted,
+ StoreIncoming(Vec<nsCString>),
+ SetUploaded(i64, Vec<Guid>),
+ SyncFinished,
+ Reset,
+ Wipe,
+}
+
+impl Ferry {
+ /// Returns the operation name for debugging and labeling the task
+ /// runnable.
+ pub fn name(&self) -> &'static str {
+ match self {
+ Ferry::LastSync => concat!(module_path!(), "getLastSync"),
+ Ferry::SetLastSync(_) => concat!(module_path!(), "setLastSync"),
+ Ferry::SyncId => concat!(module_path!(), "getSyncId"),
+ Ferry::ResetSyncId => concat!(module_path!(), "resetSyncId"),
+ Ferry::EnsureCurrentSyncId(_) => concat!(module_path!(), "ensureCurrentSyncId"),
+ Ferry::SyncStarted => concat!(module_path!(), "syncStarted"),
+ Ferry::StoreIncoming { .. } => concat!(module_path!(), "storeIncoming"),
+ Ferry::SetUploaded { .. } => concat!(module_path!(), "setUploaded"),
+ Ferry::SyncFinished => concat!(module_path!(), "syncFinished"),
+ Ferry::Reset => concat!(module_path!(), "reset"),
+ Ferry::Wipe => concat!(module_path!(), "wipe"),
+ }
+ }
+}
+
+/// The result of a ferry task, sent from the background thread back to the
+/// main thread. Results are converted to variants, and passed as arguments to
+/// `mozIBridgedSyncEngineCallback`s.
+pub enum FerryResult {
+ LastSync(i64),
+ SyncId(Option<String>),
+ AssignedSyncId(String),
+ Null,
+}
+
+impl Default for FerryResult {
+ fn default() -> Self {
+ FerryResult::Null
+ }
+}
+
+impl FerryResult {
+ /// Converts the result to an `nsIVariant` that can be passed as an
+ /// argument to `callback.handleResult()`.
+ pub fn into_variant(self) -> RefPtr<nsIVariant> {
+ match self {
+ FerryResult::LastSync(v) => v.into_variant(),
+ FerryResult::SyncId(Some(v)) => nsCString::from(v).into_variant(),
+ FerryResult::SyncId(None) => ().into_variant(),
+ FerryResult::AssignedSyncId(v) => nsCString::from(v).into_variant(),
+ FerryResult::Null => ().into_variant(),
+ }
+ }
+}
diff --git a/services/sync/golden_gate/src/lib.rs b/services/sync/golden_gate/src/lib.rs
new file mode 100644
index 0000000000..8da6524bd7
--- /dev/null
+++ b/services/sync/golden_gate/src/lib.rs
@@ -0,0 +1,119 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+//! **Golden Gate** 🌉 is a crate for bridging Desktop Sync to our suite of
+//! Rust sync and storage components. It connects Sync's `BridgedEngine` class
+//! to the Rust `BridgedEngine` trait via the `mozIBridgedSyncEngine` XPCOM
+//! interface.
+//!
+//! Due to limitations in implementing XPCOM interfaces for generic types,
+//! Golden Gate doesn't implement `mozIBridgedSyncEngine` directly. Instead,
+//! it provides helpers, called "ferries", for passing Sync records between
+//! JavaScript and Rust. The ferries also handle threading and type
+//! conversions.
+//!
+//! Here's a step-by-step guide for adding a new Rust Sync engine to Firefox.
+//!
+//! ## Step 1: Create your (XPCOM) bridge
+//!
+//! In your consuming crate, define a type for your `mozIBridgedSyncEngine`
+//! implementation. We'll call this type the **brige**. The bridge is
+//! responsible for exposing your Sync engine to XPIDL [^1], in a way that lets
+//! JavaScript call it.
+//!
+//! For your bridge type, you'll need to implement an xpcom interface with the
+//! `#[xpcom(implement(mozIBridgedSyncEngine), nonatomic)]` attribute then
+//! define `xpcom_method!()` stubs for the `mozIBridgedSyncEngine` methods. For
+//! more details about implementing XPCOM methods in Rust, check out the docs in
+//! `xpcom/rust/xpcom/src/method.rs`.
+//!
+//! You'll also need to add an entry for your bridge type to `components.conf`,
+//! and define C++ and Rust constructors for it, so that JavaScript code can
+//! create instances of it. Check out `NS_NewWebExtStorage` (and, in C++,
+//! `mozilla::extensions::storageapi::NewWebExtStorage`) and
+//! `NS_NewSyncedBookmarksMerger` (`mozilla::places::NewSyncedBookmarksMerger`
+//! in C++) for how to do this.
+//!
+//! [^1]: You can think of XPIDL as a souped-up C FFI, with richer types and a
+//! degree of type safety.
+//!
+//! ## Step 2: Add a background task queue to your bridge
+//!
+//! A task queue lets your engine do I/O, merging, and other syncing tasks on a
+//! background thread pool. This is important because database reads and writes
+//! can take an unpredictable amount of time. Doing these on the main thread can
+//! cause jank, and, in the worst case, lock up the browser UI for seconds at a
+//! time.
+//!
+//! The `moz_task` crate provides a `create_background_task_queue` function to
+//! do this. Once you have a queue, you can use it to call into your Rust
+//! engine. Golden Gate takes care of ferrying arguments back and forth across
+//! the thread boundary.
+//!
+//! Since it's a queue, ferries arrive in the order they're scheduled, so
+//! your engine's `store_incoming` method will always be called before `apply`,
+//! which is likewise called before `set_uploaded`. The thread manager scales
+//! the pool for you; you don't need to create or manage your own threads.
+//!
+//! ## Step 3: Create your Rust engine
+//!
+//! Next, you'll need to implement the Rust side of the bridge. This is a type
+//! that implements the `BridgedEngine` trait.
+//!
+//! Bridged engines handle storing incoming Sync records, merging changes,
+//! resolving conflicts, and fetching outgoing records for upload. Under the
+//! hood, your engine will hold either a database connection directly, or
+//! another object that does.
+//!
+//! Although outside the scope of Golden Gate, your engine will also likely
+//! expose a data storage API, for fetching, updating, and deleting items
+//! locally. Golden Gate provides the syncing layer on top of this local store.
+//!
+//! A `BridgedEngine` itself doesn't need to be `Send` or `Sync`, but the
+//! ferries require both, since they're calling into your bridge on the
+//! background task queue.
+//!
+//! In practice, this means your bridge will need to hold a thread-safe owned
+//! reference to the engine, via `Arc<Mutex<BridgedEngine>>`. In fact, this
+//! pattern is so common that Golden Gate implements `BridgedEngine` for any
+//! `Mutex<BridgedEngine>`, which automatically locks the mutex before calling
+//! into the engine.
+//!
+//! ## Step 4: Connect the bridge to the JavaScript and Rust sides
+//!
+//! On the JavaScript side, you'll need to subclass Sync's `BridgedEngine`
+//! class, and give it a handle to your XPCOM bridge. The base class has all the
+//! machinery for hooking up any `mozIBridgedSyncEngine` implementation so that
+//! Sync can drive it.
+//!
+//! On the Rust side, each `mozIBridgedSyncEngine` method should create a
+//! Golden Gate ferry, and dispatch it to the background task queue. The
+//! ferries correspond to the method names. For example, `ensureCurrentSyncId`
+//! should create a `Ferry::ensure_current_sync_id(...)`; `storeIncoming`, a
+//! `Ferry::store_incoming(...)`; and so on. This is mostly boilerplate.
+//!
+//! And that's it! Each ferry will, in turn, call into your Rust
+//! `BridgedEngine`, and send the results back to JavaScript.
+//!
+//! For an example of how all this works, including exposing a storage (not
+//! just syncing!) API to JS via XPIDL, check out `webext_storage::Bridge` for
+//! the `storage.sync` API!
+
+#[macro_use]
+extern crate cstr;
+
+pub mod error;
+mod ferry;
+pub mod log;
+pub mod task;
+
+pub use crate::log::LogSink;
+pub use error::{Error, Result};
+// Re-export items from `interrupt-support` and `sync15`, so that
+// consumers of `golden_gate` don't have to depend on them.
+pub use interrupt_support::{Interrupted, Interruptee};
+pub use sync15::bso::{IncomingBso, OutgoingBso};
+pub use sync15::engine::{ApplyResults, BridgedEngine};
+pub use sync15::Guid;
+pub use task::{ApplyTask, FerryTask};
diff --git a/services/sync/golden_gate/src/log.rs b/services/sync/golden_gate/src/log.rs
new file mode 100644
index 0000000000..de7fd0dfc3
--- /dev/null
+++ b/services/sync/golden_gate/src/log.rs
@@ -0,0 +1,161 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::fmt::{self, Write};
+
+use log::{Level, LevelFilter, Log, Metadata, Record};
+use moz_task::{Task, TaskRunnable, ThreadPtrHandle, ThreadPtrHolder};
+use nserror::nsresult;
+use nsstring::nsString;
+use xpcom::{interfaces::mozIServicesLogSink, RefPtr};
+
+pub struct LogSink {
+ pub max_level: LevelFilter,
+ logger: Option<ThreadPtrHandle<mozIServicesLogSink>>,
+}
+
+impl Default for LogSink {
+ fn default() -> Self {
+ LogSink {
+ max_level: LevelFilter::Off,
+ logger: None,
+ }
+ }
+}
+
+impl LogSink {
+ /// Creates a log sink that adapts the Rust `log` crate to the Sync
+ /// `Log.sys.mjs` logger.
+ ///
+ /// This is copied from `bookmark_sync::Logger`. It would be nice to share
+ /// these, but, for now, we've just duplicated it to make prototyping
+ /// easier.
+ #[inline]
+ pub fn new(max_level: LevelFilter, logger: ThreadPtrHandle<mozIServicesLogSink>) -> LogSink {
+ LogSink {
+ max_level,
+ logger: Some(logger),
+ }
+ }
+
+ /// Creates a log sink using the given Services `logger` as the
+ /// underlying implementation. The `logger` will always be called
+ /// asynchronously on its owning thread; it doesn't need to be
+ /// thread-safe.
+ pub fn with_logger(logger: Option<&mozIServicesLogSink>) -> Result<LogSink, nsresult> {
+ Ok(if let Some(logger) = logger {
+ // Fetch the maximum log level while we're on the main thread, so
+ // that `LogSink::enabled()` can check it while on the background
+ // thread. Otherwise, we'd need to dispatch a `LogTask` for every
+ // log message, only to discard most of them when the task calls
+ // into the logger on the main thread.
+ let mut raw_max_level = 0i16;
+ let rv = unsafe { logger.GetMaxLevel(&mut raw_max_level) };
+ let max_level = if rv.succeeded() {
+ match raw_max_level {
+ mozIServicesLogSink::LEVEL_ERROR => LevelFilter::Error,
+ mozIServicesLogSink::LEVEL_WARN => LevelFilter::Warn,
+ mozIServicesLogSink::LEVEL_DEBUG => LevelFilter::Debug,
+ mozIServicesLogSink::LEVEL_TRACE => LevelFilter::Trace,
+ mozIServicesLogSink::LEVEL_INFO => LevelFilter::Info,
+ _ => LevelFilter::Off,
+ }
+ } else {
+ LevelFilter::Off
+ };
+ LogSink::new(
+ max_level,
+ ThreadPtrHolder::new(cstr!("mozIServicesLogSink"), RefPtr::new(logger))?,
+ )
+ } else {
+ LogSink::default()
+ })
+ }
+
+ /// Returns a reference to the underlying `mozIServicesLogSink`.
+ pub fn logger(&self) -> Option<&mozIServicesLogSink> {
+ self.logger.as_ref().and_then(|l| l.get())
+ }
+
+ /// Logs a message to the Sync logger, if one is set. This would be better
+ /// implemented as a macro, as Dogear does, so that we can pass variadic
+ /// arguments without manually invoking `fmt_args!()` every time we want
+ /// to log a message.
+ ///
+ /// The `log` crate's macros aren't suitable here, because those log to the
+ /// global logger. However, we don't want to set the global logger in our
+ /// crate, because that will log _everything_ that uses the Rust `log` crate
+ /// to the Sync logs, including WebRender and audio logging.
+ pub fn debug(&self, args: fmt::Arguments) {
+ let meta = Metadata::builder()
+ .level(Level::Debug)
+ .target(module_path!())
+ .build();
+ if self.enabled(&meta) {
+ self.log(&Record::builder().args(args).metadata(meta).build());
+ }
+ }
+}
+
+impl Log for LogSink {
+ #[inline]
+ fn enabled(&self, meta: &Metadata) -> bool {
+ self.logger.is_some() && meta.level() <= self.max_level
+ }
+
+ fn log(&self, record: &Record) {
+ if !self.enabled(record.metadata()) {
+ return;
+ }
+ if let Some(logger) = &self.logger {
+ let mut message = nsString::new();
+ if write!(message, "{}", record.args()).is_ok() {
+ let task = LogTask {
+ logger: logger.clone(),
+ level: record.metadata().level(),
+ message,
+ };
+ let _ = TaskRunnable::new("extension_storage_sync::Logger::log", Box::new(task))
+ .and_then(|r| TaskRunnable::dispatch(r, logger.owning_thread()));
+ }
+ }
+ }
+
+ fn flush(&self) {}
+}
+
+/// Logs a message to the mirror logger. This task is created on the background
+/// thread queue, and dispatched to the main thread.
+struct LogTask {
+ logger: ThreadPtrHandle<mozIServicesLogSink>,
+ level: Level,
+ message: nsString,
+}
+
+impl Task for LogTask {
+ fn run(&self) {
+ let logger = self.logger.get().unwrap();
+ match self.level {
+ Level::Error => unsafe {
+ logger.Error(&*self.message);
+ },
+ Level::Warn => unsafe {
+ logger.Warn(&*self.message);
+ },
+ Level::Debug => unsafe {
+ logger.Debug(&*self.message);
+ },
+ Level::Trace => unsafe {
+ logger.Trace(&*self.message);
+ },
+ Level::Info => unsafe {
+ logger.Info(&*self.message);
+ },
+ }
+ }
+
+ fn done(&self) -> Result<(), nsresult> {
+ Ok(())
+ }
+}
diff --git a/services/sync/golden_gate/src/task.rs b/services/sync/golden_gate/src/task.rs
new file mode 100644
index 0000000000..8cab21830b
--- /dev/null
+++ b/services/sync/golden_gate/src/task.rs
@@ -0,0 +1,355 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::{fmt::Write, mem, result};
+
+use atomic_refcell::AtomicRefCell;
+use moz_task::{DispatchOptions, Task, TaskRunnable, ThreadPtrHandle, ThreadPtrHolder};
+use nserror::{nsresult, NS_ERROR_FAILURE};
+use nsstring::{nsACString, nsCString};
+use sync15::engine::{ApplyResults, BridgedEngine};
+use sync15::Guid;
+use thin_vec::ThinVec;
+use xpcom::{
+ interfaces::{
+ mozIBridgedSyncEngineApplyCallback, mozIBridgedSyncEngineCallback, nsIEventTarget,
+ },
+ RefPtr,
+};
+
+use crate::error::{Error, Result};
+use crate::ferry::{Ferry, FerryResult};
+
+/// A ferry task sends (or ferries) an operation to a bridged engine on a
+/// background thread or task queue, and ferries back an optional result to
+/// a callback.
+pub struct FerryTask {
+ /// We want to ensure scheduled ferries can't block finalization of the underlying
+ /// store - we want a degree of confidence that closing the database will happen when
+ /// we want even if tasks are queued up to run on another thread.
+ /// We rely on the semantics of our BridgedEngines to help here:
+ /// * A bridged engine is expected to hold a weak reference to its store.
+ /// * Our LazyStore is the only thing holding a reference to the "real" store.
+ /// Thus, when our LazyStore asks our "real" store to close, we can be confident
+ /// a close will happen (ie, we assume that the real store will be able to unwrapp
+ /// the underlying sqlite `Connection` (using `Arc::try_unwrap`) and close it.
+ /// However, note that if an operation on the bridged engine is currently running,
+ /// we will block waiting for that operation to complete, so while this isn't
+ /// guaranteed to happen immediately, it should happen "soon enough".
+ engine: Box<dyn BridgedEngine>,
+ ferry: Ferry,
+ callback: ThreadPtrHandle<mozIBridgedSyncEngineCallback>,
+ result: AtomicRefCell<anyhow::Result<FerryResult>>,
+}
+
+impl FerryTask {
+ /// Creates a task to fetch the engine's last sync time, in milliseconds.
+ #[inline]
+ pub fn for_last_sync(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::LastSync, callback)
+ }
+
+ /// Creates a task to set the engine's last sync time, in milliseconds.
+ #[inline]
+ pub fn for_set_last_sync(
+ engine: Box<dyn BridgedEngine>,
+ last_sync_millis: i64,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::SetLastSync(last_sync_millis), callback)
+ }
+
+ /// Creates a task to fetch the engine's sync ID.
+ #[inline]
+ pub fn for_sync_id(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::SyncId, callback)
+ }
+
+ /// Creates a task to reset the engine's sync ID and all its local Sync
+ /// metadata.
+ #[inline]
+ pub fn for_reset_sync_id(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::ResetSyncId, callback)
+ }
+
+ /// Creates a task to compare the bridged engine's local sync ID with
+ /// the `new_sync_id` from `meta/global`, and ferry back the final sync ID
+ /// to use.
+ #[inline]
+ pub fn for_ensure_current_sync_id(
+ engine: Box<dyn BridgedEngine>,
+ new_sync_id: &nsACString,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(
+ engine,
+ Ferry::EnsureCurrentSyncId(std::str::from_utf8(new_sync_id)?.into()),
+ callback,
+ )
+ }
+
+ /// Creates a task to signal that the engine is about to sync.
+ #[inline]
+ pub fn for_sync_started(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::SyncStarted, callback)
+ }
+
+ /// Creates a task to store incoming records.
+ pub fn for_store_incoming(
+ engine: Box<dyn BridgedEngine>,
+ incoming_envelopes_json: &[nsCString],
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(
+ engine,
+ Ferry::StoreIncoming(incoming_envelopes_json.to_vec()),
+ callback,
+ )
+ }
+
+ /// Creates a task to mark a subset of outgoing records as uploaded. This
+ /// may be called multiple times per sync, or not at all if there are no
+ /// records to upload.
+ pub fn for_set_uploaded(
+ engine: Box<dyn BridgedEngine>,
+ server_modified_millis: i64,
+ uploaded_ids: &[nsCString],
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ let uploaded_ids = uploaded_ids.iter().map(|id| Guid::from_slice(id)).collect();
+ Self::with_ferry(
+ engine,
+ Ferry::SetUploaded(server_modified_millis, uploaded_ids),
+ callback,
+ )
+ }
+
+ /// Creates a task to signal that all records have been uploaded, and
+ /// the engine has been synced. This is called even if there were no
+ /// records uploaded.
+ #[inline]
+ pub fn for_sync_finished(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::SyncFinished, callback)
+ }
+
+ /// Creates a task to reset all local Sync state for the engine, without
+ /// erasing user data.
+ #[inline]
+ pub fn for_reset(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::Reset, callback)
+ }
+
+ /// Creates a task to erase all local user data for the engine.
+ #[inline]
+ pub fn for_wipe(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ Self::with_ferry(engine, Ferry::Wipe, callback)
+ }
+
+ /// Creates a task for a ferry. The `callback` is bound to the current
+ /// thread, and will be called once, after the ferry returns from the
+ /// background thread.
+ fn with_ferry(
+ engine: Box<dyn BridgedEngine>,
+ ferry: Ferry,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<FerryTask> {
+ let name = ferry.name();
+ Ok(FerryTask {
+ engine,
+ ferry,
+ callback: ThreadPtrHolder::new(
+ cstr!("mozIBridgedSyncEngineCallback"),
+ RefPtr::new(callback),
+ )?,
+ result: AtomicRefCell::new(Err(Error::DidNotRun(name).into())),
+ })
+ }
+
+ /// Dispatches the task to the given thread `target`.
+ pub fn dispatch(self, target: &nsIEventTarget) -> Result<()> {
+ let runnable = TaskRunnable::new(self.ferry.name(), Box::new(self))?;
+ // `may_block` schedules the task on the I/O thread pool, since we
+ // expect most operations to wait on I/O.
+ TaskRunnable::dispatch_with_options(
+ runnable,
+ target,
+ DispatchOptions::default().may_block(true),
+ )?;
+ Ok(())
+ }
+
+ /// Runs the task on the background thread. This is split out into its own
+ /// method to make error handling easier.
+ fn inner_run(&self) -> anyhow::Result<FerryResult> {
+ let engine = &self.engine;
+ Ok(match &self.ferry {
+ Ferry::LastSync => FerryResult::LastSync(engine.last_sync()?),
+ Ferry::SetLastSync(last_sync_millis) => {
+ engine.set_last_sync(*last_sync_millis)?;
+ FerryResult::default()
+ }
+ Ferry::SyncId => FerryResult::SyncId(engine.sync_id()?),
+ Ferry::ResetSyncId => FerryResult::AssignedSyncId(engine.reset_sync_id()?),
+ Ferry::EnsureCurrentSyncId(new_sync_id) => {
+ FerryResult::AssignedSyncId(engine.ensure_current_sync_id(new_sync_id)?)
+ }
+ Ferry::SyncStarted => {
+ engine.sync_started()?;
+ FerryResult::default()
+ }
+ Ferry::StoreIncoming(incoming_envelopes_json) => {
+ let incoming_envelopes = incoming_envelopes_json
+ .iter()
+ .map(|envelope| Ok(serde_json::from_slice(envelope)?))
+ .collect::<Result<_>>()?;
+
+ engine.store_incoming(incoming_envelopes)?;
+ FerryResult::default()
+ }
+ Ferry::SetUploaded(server_modified_millis, uploaded_ids) => {
+ engine.set_uploaded(*server_modified_millis, uploaded_ids.as_slice())?;
+ FerryResult::default()
+ }
+ Ferry::SyncFinished => {
+ engine.sync_finished()?;
+ FerryResult::default()
+ }
+ Ferry::Reset => {
+ engine.reset()?;
+ FerryResult::default()
+ }
+ Ferry::Wipe => {
+ engine.wipe()?;
+ FerryResult::default()
+ }
+ })
+ }
+}
+
+impl Task for FerryTask {
+ fn run(&self) {
+ *self.result.borrow_mut() = self.inner_run();
+ }
+
+ fn done(&self) -> result::Result<(), nsresult> {
+ let callback = self.callback.get().unwrap();
+ match mem::replace(
+ &mut *self.result.borrow_mut(),
+ Err(Error::DidNotRun(self.ferry.name()).into()),
+ ) {
+ Ok(result) => unsafe { callback.HandleSuccess(result.into_variant().coerce()) },
+ Err(err) => {
+ let mut message = nsCString::new();
+ write!(message, "{err}").unwrap();
+ unsafe { callback.HandleError(NS_ERROR_FAILURE, &*message) }
+ }
+ }
+ .to_result()
+ }
+}
+
+/// An apply task ferries incoming records to an engine on a background
+/// thread, and ferries back records to upload. It's separate from
+/// `FerryTask` because its callback type is different.
+pub struct ApplyTask {
+ engine: Box<dyn BridgedEngine>,
+ callback: ThreadPtrHandle<mozIBridgedSyncEngineApplyCallback>,
+ result: AtomicRefCell<anyhow::Result<Vec<String>>>,
+}
+
+impl ApplyTask {
+ /// Returns the task name for debugging.
+ pub fn name() -> &'static str {
+ concat!(module_path!(), "apply")
+ }
+
+ /// Runs the task on the background thread.
+ fn inner_run(&self) -> anyhow::Result<Vec<String>> {
+ let ApplyResults {
+ records: outgoing_records,
+ ..
+ } = self.engine.apply()?;
+ let outgoing_records_json = outgoing_records
+ .iter()
+ .map(|record| Ok(serde_json::to_string(record)?))
+ .collect::<Result<_>>()?;
+ Ok(outgoing_records_json)
+ }
+
+ /// Creates a task. The `callback` is bound to the current thread, and will
+ /// be called once, after the records are applied on the background thread.
+ pub fn new(
+ engine: Box<dyn BridgedEngine>,
+ callback: &mozIBridgedSyncEngineApplyCallback,
+ ) -> Result<ApplyTask> {
+ Ok(ApplyTask {
+ engine,
+ callback: ThreadPtrHolder::new(
+ cstr!("mozIBridgedSyncEngineApplyCallback"),
+ RefPtr::new(callback),
+ )?,
+ result: AtomicRefCell::new(Err(Error::DidNotRun(Self::name()).into())),
+ })
+ }
+
+ /// Dispatches the task to the given thread `target`.
+ pub fn dispatch(self, target: &nsIEventTarget) -> Result<()> {
+ let runnable = TaskRunnable::new(Self::name(), Box::new(self))?;
+ TaskRunnable::dispatch_with_options(
+ runnable,
+ target,
+ DispatchOptions::default().may_block(true),
+ )?;
+ Ok(())
+ }
+}
+
+impl Task for ApplyTask {
+ fn run(&self) {
+ *self.result.borrow_mut() = self.inner_run();
+ }
+
+ fn done(&self) -> result::Result<(), nsresult> {
+ let callback = self.callback.get().unwrap();
+ match mem::replace(
+ &mut *self.result.borrow_mut(),
+ Err(Error::DidNotRun(Self::name()).into()),
+ ) {
+ Ok(envelopes) => {
+ let result = envelopes
+ .into_iter()
+ .map(nsCString::from)
+ .collect::<ThinVec<_>>();
+ unsafe { callback.HandleSuccess(&result) }
+ }
+ Err(err) => {
+ let mut message = nsCString::new();
+ write!(message, "{err}").unwrap();
+ unsafe { callback.HandleError(NS_ERROR_FAILURE, &*message) }
+ }
+ }
+ .to_result()
+ }
+}
diff --git a/services/sync/modules-testing/fakeservices.sys.mjs b/services/sync/modules-testing/fakeservices.sys.mjs
new file mode 100644
index 0000000000..4fd7534bf1
--- /dev/null
+++ b/services/sync/modules-testing/fakeservices.sys.mjs
@@ -0,0 +1,114 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { Weave } from "resource://services-sync/main.sys.mjs";
+import { RawCryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+
+export function FakeFilesystemService(contents) {
+ this.fakeContents = contents;
+ let self = this;
+
+ // Save away the unmocked versions of the functions we replace here for tests
+ // that really want the originals. As this may be called many times per test,
+ // we must be careful to not replace them with ones we previously replaced.
+ // (And why are we bothering with these mocks in the first place? Is the
+ // performance of the filesystem *really* such that it outweighs the downside
+ // of not running our real JSON functions in the tests? Eg, these mocks don't
+ // always throw exceptions when the real ones do. Anyway...)
+ for (let name of ["jsonSave", "jsonLoad", "jsonMove", "jsonRemove"]) {
+ let origName = "_real_" + name;
+ if (!Utils[origName]) {
+ Utils[origName] = Utils[name];
+ }
+ }
+
+ Utils.jsonSave = async function jsonSave(filePath, that, obj) {
+ let json = typeof obj == "function" ? obj.call(that) : obj;
+ self.fakeContents["weave/" + filePath + ".json"] = JSON.stringify(json);
+ };
+
+ Utils.jsonLoad = async function jsonLoad(filePath, that) {
+ let obj;
+ let json = self.fakeContents["weave/" + filePath + ".json"];
+ if (json) {
+ obj = JSON.parse(json);
+ }
+ return obj;
+ };
+
+ Utils.jsonMove = function jsonMove(aFrom, aTo, that) {
+ const fromPath = "weave/" + aFrom + ".json";
+ self.fakeContents["weave/" + aTo + ".json"] = self.fakeContents[fromPath];
+ delete self.fakeContents[fromPath];
+ return Promise.resolve();
+ };
+
+ Utils.jsonRemove = function jsonRemove(filePath, that) {
+ delete self.fakeContents["weave/" + filePath + ".json"];
+ return Promise.resolve();
+ };
+}
+
+export function fakeSHA256HMAC(message) {
+ message = message.substr(0, 64);
+ while (message.length < 64) {
+ message += " ";
+ }
+ return message;
+}
+
+export function FakeGUIDService() {
+ let latestGUID = 0;
+
+ Utils.makeGUID = function makeGUID() {
+ // ensure that this always returns a unique 12 character string
+ let nextGUID = "fake-guid-" + String(latestGUID++).padStart(2, "0");
+ return nextGUID.slice(nextGUID.length - 12, nextGUID.length);
+ };
+}
+
+/*
+ * Mock implementation of WeaveCrypto. It does not encrypt or
+ * decrypt, merely returning the input verbatim.
+ */
+export function FakeCryptoService() {
+ this.counter = 0;
+
+ delete Weave.Crypto; // get rid of the getter first
+ Weave.Crypto = this;
+
+ RawCryptoWrapper.prototype.ciphertextHMAC = function ciphertextHMAC(
+ keyBundle
+ ) {
+ return fakeSHA256HMAC(this.ciphertext);
+ };
+}
+
+FakeCryptoService.prototype = {
+ async encrypt(clearText, symmetricKey, iv) {
+ return clearText;
+ },
+
+ async decrypt(cipherText, symmetricKey, iv) {
+ return cipherText;
+ },
+
+ async generateRandomKey() {
+ return btoa("fake-symmetric-key-" + this.counter++);
+ },
+
+ generateRandomIV: function generateRandomIV() {
+ // A base64-encoded IV is 24 characters long
+ return btoa("fake-fake-fake-random-iv");
+ },
+
+ expandData: function expandData(data, len) {
+ return data;
+ },
+
+ generateRandomBytes: function generateRandomBytes(byteCount) {
+ return "not-so-random-now-are-we-HA-HA-HA! >:)".slice(byteCount);
+ },
+};
diff --git a/services/sync/modules-testing/fxa_utils.sys.mjs b/services/sync/modules-testing/fxa_utils.sys.mjs
new file mode 100644
index 0000000000..c953f0eaa3
--- /dev/null
+++ b/services/sync/modules-testing/fxa_utils.sys.mjs
@@ -0,0 +1,55 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Weave } from "resource://services-sync/main.sys.mjs";
+import { SyncAuthManager } from "resource://services-sync/sync_auth.sys.mjs";
+
+import { TokenServerClient } from "resource://services-common/tokenserverclient.sys.mjs";
+import { configureFxAccountIdentity } from "resource://testing-common/services/sync/utils.sys.mjs";
+
+// Create a new sync_auth object and initialize it with a
+// mocked TokenServerClient which always receives the specified response.
+export var initializeIdentityWithTokenServerResponse = function (response) {
+ // First create a mock "request" object that well' hack into the token server.
+ // A log for it
+ let requestLog = Log.repository.getLogger("testing.mock-rest");
+ if (!requestLog.appenders.length) {
+ // might as well see what it says :)
+ requestLog.addAppender(new Log.DumpAppender());
+ requestLog.level = Log.Level.Trace;
+ }
+
+ // A mock request object.
+ function MockRESTRequest(url) {}
+ MockRESTRequest.prototype = {
+ _log: requestLog,
+ setHeader() {},
+ async get() {
+ this.response = response;
+ return response;
+ },
+ };
+ // The mocked TokenServer client which will get the response.
+ function MockTSC() {}
+ MockTSC.prototype = new TokenServerClient();
+ MockTSC.prototype.constructor = MockTSC;
+ MockTSC.prototype.newRESTRequest = function (url) {
+ return new MockRESTRequest(url);
+ };
+ // Arrange for the same observerPrefix as sync_auth uses.
+ MockTSC.prototype.observerPrefix = "weave:service";
+
+ // tie it all together.
+ Weave.Status.__authManager = Weave.Service.identity = new SyncAuthManager();
+ let syncAuthManager = Weave.Service.identity;
+ // a sanity check
+ if (!(syncAuthManager instanceof SyncAuthManager)) {
+ throw new Error("sync isn't configured to use sync_auth");
+ }
+ let mockTSC = new MockTSC();
+ configureFxAccountIdentity(syncAuthManager);
+ syncAuthManager._tokenServerClient = mockTSC;
+};
diff --git a/services/sync/modules-testing/rotaryengine.sys.mjs b/services/sync/modules-testing/rotaryengine.sys.mjs
new file mode 100644
index 0000000000..d7f2165e4d
--- /dev/null
+++ b/services/sync/modules-testing/rotaryengine.sys.mjs
@@ -0,0 +1,120 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { SerializableSet, Utils } from "resource://services-sync/util.sys.mjs";
+
+/*
+ * A fake engine implementation.
+ * This is used all over the place.
+ *
+ * Complete with record, store, and tracker implementations.
+ */
+
+export function RotaryRecord(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+RotaryRecord.prototype = {};
+Object.setPrototypeOf(RotaryRecord.prototype, CryptoWrapper.prototype);
+Utils.deferGetSet(RotaryRecord, "cleartext", ["denomination"]);
+
+export function RotaryStore(name, engine) {
+ Store.call(this, name, engine);
+ this.items = {};
+}
+
+RotaryStore.prototype = {
+ async create(record) {
+ this.items[record.id] = record.denomination;
+ },
+
+ async remove(record) {
+ delete this.items[record.id];
+ },
+
+ async update(record) {
+ this.items[record.id] = record.denomination;
+ },
+
+ async itemExists(id) {
+ return id in this.items;
+ },
+
+ async createRecord(id, collection) {
+ let record = new RotaryRecord(collection, id);
+
+ if (!(id in this.items)) {
+ record.deleted = true;
+ return record;
+ }
+
+ record.denomination = this.items[id] || "Data for new record: " + id;
+ return record;
+ },
+
+ async changeItemID(oldID, newID) {
+ if (oldID in this.items) {
+ this.items[newID] = this.items[oldID];
+ }
+
+ delete this.items[oldID];
+ },
+
+ async getAllIDs() {
+ let ids = {};
+ for (let id in this.items) {
+ ids[id] = true;
+ }
+ return ids;
+ },
+
+ async wipe() {
+ this.items = {};
+ },
+};
+
+Object.setPrototypeOf(RotaryStore.prototype, Store.prototype);
+
+export function RotaryTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+
+RotaryTracker.prototype = {};
+Object.setPrototypeOf(RotaryTracker.prototype, LegacyTracker.prototype);
+
+export function RotaryEngine(service) {
+ SyncEngine.call(this, "Rotary", service);
+ // Ensure that the engine starts with a clean slate.
+ this.toFetch = new SerializableSet();
+ this.previousFailed = new SerializableSet();
+}
+
+RotaryEngine.prototype = {
+ _storeObj: RotaryStore,
+ _trackerObj: RotaryTracker,
+ _recordObj: RotaryRecord,
+
+ async _findDupe(item) {
+ // This is a Special Value® used for testing proper reconciling on dupe
+ // detection.
+ if (item.id == "DUPE_INCOMING") {
+ return "DUPE_LOCAL";
+ }
+
+ for (let [id, value] of Object.entries(this._store.items)) {
+ if (item.denomination == value) {
+ return id;
+ }
+ }
+ return null;
+ },
+};
+Object.setPrototypeOf(RotaryEngine.prototype, SyncEngine.prototype);
diff --git a/services/sync/modules-testing/utils.sys.mjs b/services/sync/modules-testing/utils.sys.mjs
new file mode 100644
index 0000000000..498bf9872a
--- /dev/null
+++ b/services/sync/modules-testing/utils.sys.mjs
@@ -0,0 +1,319 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+import { Assert } from "resource://testing-common/Assert.sys.mjs";
+
+import { initTestLogging } from "resource://testing-common/services/common/logging.sys.mjs";
+import {
+ FakeCryptoService,
+ FakeFilesystemService,
+ FakeGUIDService,
+ fakeSHA256HMAC,
+} from "resource://testing-common/services/sync/fakeservices.sys.mjs";
+
+import {
+ FxAccounts,
+ AccountState,
+} from "resource://gre/modules/FxAccounts.sys.mjs";
+import { FxAccountsClient } from "resource://gre/modules/FxAccountsClient.sys.mjs";
+
+import { SCOPE_OLD_SYNC } from "resource://gre/modules/FxAccountsCommon.sys.mjs";
+
+// A mock "storage manager" for FxAccounts that doesn't actually write anywhere.
+export function MockFxaStorageManager() {}
+
+MockFxaStorageManager.prototype = {
+ promiseInitialized: Promise.resolve(),
+
+ initialize(accountData) {
+ this.accountData = accountData;
+ },
+
+ finalize() {
+ return Promise.resolve();
+ },
+
+ getAccountData(fields = null) {
+ let result;
+ if (!this.accountData) {
+ result = null;
+ } else if (fields == null) {
+ // can't use cloneInto as the keys get upset...
+ result = {};
+ for (let field of Object.keys(this.accountData)) {
+ result[field] = this.accountData[field];
+ }
+ } else {
+ if (!Array.isArray(fields)) {
+ fields = [fields];
+ }
+ result = {};
+ for (let field of fields) {
+ result[field] = this.accountData[field];
+ }
+ }
+ return Promise.resolve(result);
+ },
+
+ updateAccountData(updatedFields) {
+ for (let [name, value] of Object.entries(updatedFields)) {
+ if (value == null) {
+ delete this.accountData[name];
+ } else {
+ this.accountData[name] = value;
+ }
+ }
+ return Promise.resolve();
+ },
+
+ deleteAccountData() {
+ this.accountData = null;
+ return Promise.resolve();
+ },
+};
+
+/**
+ * First wait >100ms (nsITimers can take up to that much time to fire, so
+ * we can account for the timer in delayedAutoconnect) and then two event
+ * loop ticks (to account for the CommonUtils.nextTick() in autoConnect).
+ */
+export function waitForZeroTimer(callback) {
+ let ticks = 2;
+ function wait() {
+ if (ticks) {
+ ticks -= 1;
+ CommonUtils.nextTick(wait);
+ return;
+ }
+ callback();
+ }
+ CommonUtils.namedTimer(wait, 150, {}, "timer");
+}
+
+export var promiseZeroTimer = function () {
+ return new Promise(resolve => {
+ waitForZeroTimer(resolve);
+ });
+};
+
+export var promiseNamedTimer = function (wait, thisObj, name) {
+ return new Promise(resolve => {
+ CommonUtils.namedTimer(resolve, wait, thisObj, name);
+ });
+};
+
+// Return an identity configuration suitable for testing with our identity
+// providers. |overrides| can specify overrides for any default values.
+// |server| is optional, but if specified, will be used to form the cluster
+// URL for the FxA identity.
+export var makeIdentityConfig = function (overrides) {
+ // first setup the defaults.
+ let result = {
+ // Username used in both fxaccount and sync identity configs.
+ username: "foo",
+ // fxaccount specific credentials.
+ fxaccount: {
+ user: {
+ email: "foo",
+ scopedKeys: {
+ [SCOPE_OLD_SYNC]: {
+ kid: "1234567890123-u7u7u7u7u7u7u7u7u7u7uw",
+ k: "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqg",
+ kty: "oct",
+ },
+ },
+ sessionToken: "sessionToken",
+ uid: "a".repeat(32),
+ verified: true,
+ },
+ token: {
+ endpoint: null,
+ duration: 300,
+ id: "id",
+ key: "key",
+ hashed_fxa_uid: "f".repeat(32), // used during telemetry validation
+ // uid will be set to the username.
+ },
+ },
+ };
+
+ // Now handle any specified overrides.
+ if (overrides) {
+ if (overrides.username) {
+ result.username = overrides.username;
+ }
+ if (overrides.fxaccount) {
+ // TODO: allow just some attributes to be specified
+ result.fxaccount = overrides.fxaccount;
+ }
+ if (overrides.node_type) {
+ result.fxaccount.token.node_type = overrides.node_type;
+ }
+ }
+ return result;
+};
+
+export var makeFxAccountsInternalMock = function (config) {
+ return {
+ newAccountState(credentials) {
+ // We only expect this to be called with null indicating the (mock)
+ // storage should be read.
+ if (credentials) {
+ throw new Error("Not expecting to have credentials passed");
+ }
+ let storageManager = new MockFxaStorageManager();
+ storageManager.initialize(config.fxaccount.user);
+ let accountState = new AccountState(storageManager);
+ return accountState;
+ },
+ getOAuthToken: () => Promise.resolve("some-access-token"),
+ destroyOAuthToken: () => Promise.resolve(),
+ keys: {
+ getScopedKeys: () =>
+ Promise.resolve({
+ "https://identity.mozilla.com/apps/oldsync": {
+ identifier: "https://identity.mozilla.com/apps/oldsync",
+ keyRotationSecret:
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ keyRotationTimestamp: 1510726317123,
+ },
+ }),
+ },
+ profile: {
+ getProfile() {
+ return null;
+ },
+ },
+ };
+};
+
+// Configure an instance of an FxAccount identity provider with the specified
+// config (or the default config if not specified).
+export var configureFxAccountIdentity = function (
+ authService,
+ config = makeIdentityConfig(),
+ fxaInternal = makeFxAccountsInternalMock(config)
+) {
+ // until we get better test infrastructure for bid_identity, we set the
+ // signedin user's "email" to the username, simply as many tests rely on this.
+ config.fxaccount.user.email = config.username;
+
+ let fxa = new FxAccounts(fxaInternal);
+
+ let MockFxAccountsClient = function () {
+ FxAccountsClient.apply(this);
+ };
+ MockFxAccountsClient.prototype = {
+ accountStatus() {
+ return Promise.resolve(true);
+ },
+ };
+ Object.setPrototypeOf(
+ MockFxAccountsClient.prototype,
+ FxAccountsClient.prototype
+ );
+ let mockFxAClient = new MockFxAccountsClient();
+ fxa._internal._fxAccountsClient = mockFxAClient;
+
+ let mockTSC = {
+ // TokenServerClient
+ async getTokenUsingOAuth(url, oauthToken) {
+ Assert.equal(
+ url,
+ Services.prefs.getStringPref("identity.sync.tokenserver.uri")
+ );
+ Assert.ok(oauthToken, "oauth token present");
+ config.fxaccount.token.uid = config.username;
+ return config.fxaccount.token;
+ },
+ };
+ authService._fxaService = fxa;
+ authService._tokenServerClient = mockTSC;
+ // Set the "account" of the sync auth manager to be the "email" of the
+ // logged in user of the mockFXA service.
+ authService._signedInUser = config.fxaccount.user;
+ authService._account = config.fxaccount.user.email;
+};
+
+export var configureIdentity = async function (identityOverrides, server) {
+ let config = makeIdentityConfig(identityOverrides, server);
+ // Must be imported after the identity configuration is set up.
+ let { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+ );
+
+ // If a server was specified, ensure FxA has a correct cluster URL available.
+ if (server && !config.fxaccount.token.endpoint) {
+ let ep = server.baseURI;
+ if (!ep.endsWith("/")) {
+ ep += "/";
+ }
+ ep += "1.1/" + config.username + "/";
+ config.fxaccount.token.endpoint = ep;
+ }
+
+ configureFxAccountIdentity(Service.identity, config);
+ Services.prefs.setStringPref("services.sync.username", config.username);
+ // many of these tests assume all the auth stuff is setup and don't hit
+ // a path which causes that auth to magically happen - so do it now.
+ await Service.identity._ensureValidToken();
+
+ // and cheat to avoid requiring each test do an explicit login - give it
+ // a cluster URL.
+ if (config.fxaccount.token.endpoint) {
+ Service.clusterURL = config.fxaccount.token.endpoint;
+ }
+};
+
+export function syncTestLogging(level = "Trace") {
+ let logStats = initTestLogging(level);
+ Services.prefs.setStringPref("services.sync.log.logger", level);
+ Services.prefs.setStringPref("services.sync.log.logger.engine", "");
+ return logStats;
+}
+
+export var SyncTestingInfrastructure = async function (server, username) {
+ let config = makeIdentityConfig({ username });
+ await configureIdentity(config, server);
+ return {
+ logStats: syncTestLogging(),
+ fakeFilesystem: new FakeFilesystemService({}),
+ fakeGUIDService: new FakeGUIDService(),
+ fakeCryptoService: new FakeCryptoService(),
+ };
+};
+
+/**
+ * Turn WBO cleartext into fake "encrypted" payload as it goes over the wire.
+ */
+export function encryptPayload(cleartext) {
+ if (typeof cleartext == "object") {
+ cleartext = JSON.stringify(cleartext);
+ }
+
+ return {
+ ciphertext: cleartext, // ciphertext == cleartext with fake crypto
+ IV: "irrelevant",
+ hmac: fakeSHA256HMAC(cleartext),
+ };
+}
+
+export var sumHistogram = function (name, options = {}) {
+ let histogram = options.key
+ ? Services.telemetry.getKeyedHistogramById(name)
+ : Services.telemetry.getHistogramById(name);
+ let snapshot = histogram.snapshot();
+ let sum = -Infinity;
+ if (snapshot) {
+ if (options.key && snapshot[options.key]) {
+ sum = snapshot[options.key].sum;
+ } else {
+ sum = snapshot.sum;
+ }
+ }
+ histogram.clear();
+ return sum;
+};
diff --git a/services/sync/modules/SyncDisconnect.sys.mjs b/services/sync/modules/SyncDisconnect.sys.mjs
new file mode 100644
index 0000000000..2206a462ac
--- /dev/null
+++ b/services/sync/modules/SyncDisconnect.sys.mjs
@@ -0,0 +1,235 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This module provides a facility for disconnecting Sync and FxA, optionally
+// sanitizing profile data as part of the process.
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AsyncShutdown: "resource://gre/modules/AsyncShutdown.sys.mjs",
+ Log: "resource://gre/modules/Log.sys.mjs",
+ PREF_LAST_FXA_USER: "resource://gre/modules/FxAccountsCommon.sys.mjs",
+ Sanitizer: "resource:///modules/Sanitizer.sys.mjs",
+ Utils: "resource://services-sync/util.sys.mjs",
+ setTimeout: "resource://gre/modules/Timer.sys.mjs",
+});
+
+ChromeUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+export const SyncDisconnectInternal = {
+ lockRetryInterval: 1000, // wait 1 seconds before trying for the lock again.
+ lockRetryCount: 120, // Try 120 times (==2 mins) before giving up in disgust.
+ promiseDisconnectFinished: null, // If we are sanitizing, a promise for completion.
+
+ // mocked by tests.
+ getWeave() {
+ return ChromeUtils.importESModule("resource://services-sync/main.sys.mjs")
+ .Weave;
+ },
+
+ // Returns a promise that resolves when we are not syncing, waiting until
+ // a current Sync completes if necessary. Resolves with true if we
+ // successfully waited, in which case the sync lock will have been taken to
+ // ensure future syncs don't state, or resolves with false if we gave up
+ // waiting for the sync to complete (in which case we didn't take a lock -
+ // but note that Sync probably remains locked in this case regardless.)
+ async promiseNotSyncing(abortController) {
+ let weave = this.getWeave();
+ let log = lazy.Log.repository.getLogger("Sync.Service");
+ // We might be syncing - poll for up to 2 minutes waiting for the lock.
+ // (2 minutes seems extreme, but should be very rare.)
+ return new Promise(resolve => {
+ abortController.signal.onabort = () => {
+ resolve(false);
+ };
+
+ let attempts = 0;
+ let checkLock = () => {
+ if (abortController.signal.aborted) {
+ // We've already resolved, so don't want a new timer to ever start.
+ return;
+ }
+ if (weave.Service.lock()) {
+ resolve(true);
+ return;
+ }
+ attempts += 1;
+ if (attempts >= this.lockRetryCount) {
+ log.error(
+ "Gave up waiting for the sync lock - going ahead with sanitize anyway"
+ );
+ resolve(false);
+ return;
+ }
+ log.debug("Waiting a couple of seconds to get the sync lock");
+ lazy.setTimeout(checkLock, this.lockRetryInterval);
+ };
+ checkLock();
+ });
+ },
+
+ // Sanitize Sync-related data.
+ async doSanitizeSyncData() {
+ let weave = this.getWeave();
+ // Get the sync logger - if stuff goes wrong it can be useful to have that
+ // recorded in the sync logs.
+ let log = lazy.Log.repository.getLogger("Sync.Service");
+ log.info("Starting santitize of Sync data");
+ try {
+ // We clobber data for all Sync engines that are enabled.
+ await weave.Service.promiseInitialized;
+ weave.Service.enabled = false;
+
+ log.info("starting actual sanitization");
+ for (let engine of weave.Service.engineManager.getAll()) {
+ if (engine.enabled) {
+ try {
+ log.info("Wiping engine", engine.name);
+ await engine.wipeClient();
+ } catch (ex) {
+ log.error("Failed to wipe engine", ex);
+ }
+ }
+ }
+ // Reset the pref which is used to show a warning when a different user
+ // signs in - this is no longer a concern now that we've removed the
+ // data from the profile.
+ Services.prefs.clearUserPref(lazy.PREF_LAST_FXA_USER);
+
+ log.info("Finished wiping sync data");
+ } catch (ex) {
+ log.error("Failed to sanitize Sync data", ex);
+ console.error("Failed to sanitize Sync data", ex);
+ }
+ try {
+ // ensure any logs we wrote are flushed to disk.
+ await weave.Service.errorHandler.resetFileLog();
+ } catch (ex) {
+ console.log("Failed to flush the Sync log", ex);
+ }
+ },
+
+ // Sanitize all Browser data.
+ async doSanitizeBrowserData() {
+ try {
+ // sanitize everything other than "open windows" (and we don't do that
+ // because it may confuse the user - they probably want to see
+ // about:prefs with the disconnection reflected.
+ let itemsToClear = Object.keys(lazy.Sanitizer.items).filter(
+ k => k != "openWindows"
+ );
+ await lazy.Sanitizer.sanitize(itemsToClear);
+ } catch (ex) {
+ console.error("Failed to sanitize other data", ex);
+ }
+ },
+
+ async doSyncAndAccountDisconnect(shouldUnlock) {
+ // We do a startOver of Sync first - if we do the account first we end
+ // up with Sync configured but FxA not configured, which causes the browser
+ // UI to briefly enter a "needs reauth" state.
+ let Weave = this.getWeave();
+ await Weave.Service.promiseInitialized;
+ await Weave.Service.startOver();
+ await lazy.fxAccounts.signOut();
+ // Sync may have been disabled if we santized, so re-enable it now or
+ // else the user will be unable to resync should they sign in before a
+ // restart.
+ Weave.Service.enabled = true;
+
+ // and finally, if we managed to get the lock before, we should unlock it
+ // now.
+ if (shouldUnlock) {
+ Weave.Service.unlock();
+ }
+ },
+
+ // Start the sanitization process. Returns a promise that resolves when
+ // the sanitize is complete, and an AbortController which can be used to
+ // abort the process of waiting for a sync to complete.
+ async _startDisconnect(abortController, sanitizeData = false) {
+ // This is a bit convoluted - we want to wait for a sync to finish before
+ // sanitizing, but want to abort that wait if the browser shuts down while
+ // we are waiting (in which case we'll charge ahead anyway).
+ // So we do this by using an AbortController and passing that to the
+ // function that waits for the sync lock - it will immediately resolve
+ // if the abort controller is aborted.
+ let log = lazy.Log.repository.getLogger("Sync.Service");
+
+ // If the master-password is locked then we will fail to fully sanitize,
+ // so prompt for that now. If canceled, we just abort now.
+ log.info("checking master-password state");
+ if (!lazy.Utils.ensureMPUnlocked()) {
+ log.warn(
+ "The master-password needs to be unlocked to fully disconnect from sync"
+ );
+ return;
+ }
+
+ log.info("waiting for any existing syncs to complete");
+ let locked = await this.promiseNotSyncing(abortController);
+
+ if (sanitizeData) {
+ await this.doSanitizeSyncData();
+
+ // We disconnect before sanitizing the browser data - in a worst-case
+ // scenario where the sanitize takes so long that even the shutdown
+ // blocker doesn't allow it to finish, we should still at least be in
+ // a disconnected state on the next startup.
+ log.info("disconnecting account");
+ await this.doSyncAndAccountDisconnect(locked);
+
+ await this.doSanitizeBrowserData();
+ } else {
+ log.info("disconnecting account");
+ await this.doSyncAndAccountDisconnect(locked);
+ }
+ },
+
+ async disconnect(sanitizeData) {
+ if (this.promiseDisconnectFinished) {
+ throw new Error("A disconnect is already in progress");
+ }
+ let abortController = new AbortController();
+ let promiseDisconnectFinished = this._startDisconnect(
+ abortController,
+ sanitizeData
+ );
+ this.promiseDisconnectFinished = promiseDisconnectFinished;
+ let shutdownBlocker = () => {
+ // oh dear - we are sanitizing (probably stuck waiting for a sync to
+ // complete) and the browser is shutting down. Let's avoid the wait
+ // for sync to complete and continue the process anyway.
+ abortController.abort();
+ return promiseDisconnectFinished;
+ };
+ lazy.AsyncShutdown.quitApplicationGranted.addBlocker(
+ "SyncDisconnect: removing requested data",
+ shutdownBlocker
+ );
+
+ // wait for it to finish - hopefully without the blocker being called.
+ await promiseDisconnectFinished;
+ this.promiseDisconnectFinished = null;
+
+ // sanitize worked so remove our blocker - it's a noop if the blocker
+ // did call us.
+ lazy.AsyncShutdown.quitApplicationGranted.removeBlocker(shutdownBlocker);
+ },
+};
+
+export const SyncDisconnect = {
+ get promiseDisconnectFinished() {
+ return SyncDisconnectInternal.promiseDisconnectFinished;
+ },
+
+ disconnect(sanitizeData) {
+ return SyncDisconnectInternal.disconnect(sanitizeData);
+ },
+};
diff --git a/services/sync/modules/SyncedTabs.sys.mjs b/services/sync/modules/SyncedTabs.sys.mjs
new file mode 100644
index 0000000000..410244413e
--- /dev/null
+++ b/services/sync/modules/SyncedTabs.sys.mjs
@@ -0,0 +1,348 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ CLIENT_NOT_CONFIGURED: "resource://services-sync/constants.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+// The Sync XPCOM service
+ChromeUtils.defineLazyGetter(lazy, "weaveXPCService", function () {
+ return Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+});
+
+// from MDN...
+function escapeRegExp(string) {
+ return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
+}
+
+// A topic we fire whenever we have new tabs available. This might be due
+// to a request made by this module to refresh the tab list, or as the result
+// of a regularly scheduled sync. The intent is that consumers just listen
+// for this notification and update their UI in response.
+const TOPIC_TABS_CHANGED = "services.sync.tabs.changed";
+
+// The interval, in seconds, before which we consider the existing list
+// of tabs "fresh enough" and don't force a new sync.
+const TABS_FRESH_ENOUGH_INTERVAL_SECONDS = 30;
+
+ChromeUtils.defineLazyGetter(lazy, "log", () => {
+ const { Log } = ChromeUtils.importESModule(
+ "resource://gre/modules/Log.sys.mjs"
+ );
+ let log = Log.repository.getLogger("Sync.RemoteTabs");
+ log.manageLevelFromPref("services.sync.log.logger.tabs");
+ return log;
+});
+
+// A private singleton that does the work.
+let SyncedTabsInternal = {
+ /* Make a "tab" record. Returns a promise */
+ async _makeTab(client, tab, url, showRemoteIcons) {
+ let icon;
+ if (showRemoteIcons) {
+ icon = tab.icon;
+ }
+ if (!icon) {
+ // By not specifying a size the favicon service will pick the default,
+ // that is usually set through setDefaultIconURIPreferredSize by the
+ // first browser window. Commonly it's 16px at current dpi.
+ icon = "page-icon:" + url;
+ }
+ return {
+ type: "tab",
+ title: tab.title || url,
+ url,
+ icon,
+ client: client.id,
+ lastUsed: tab.lastUsed,
+ inactive: tab.inactive,
+ };
+ },
+
+ /* Make a "client" record. Returns a promise for consistency with _makeTab */
+ async _makeClient(client) {
+ return {
+ id: client.id,
+ type: "client",
+ name: lazy.Weave.Service.clientsEngine.getClientName(client.id),
+ clientType: lazy.Weave.Service.clientsEngine.getClientType(client.id),
+ lastModified: client.lastModified * 1000, // sec to ms
+ tabs: [],
+ };
+ },
+
+ _tabMatchesFilter(tab, filter) {
+ let reFilter = new RegExp(escapeRegExp(filter), "i");
+ return reFilter.test(tab.url) || reFilter.test(tab.title);
+ },
+
+ _createRecentTabsList(
+ clients,
+ maxCount,
+ extraParams = { removeAllDupes: true, removeDeviceDupes: false }
+ ) {
+ let tabs = [];
+
+ for (let client of clients) {
+ if (extraParams.removeDeviceDupes) {
+ client.tabs = this._filterRecentTabsDupes(client.tabs);
+ }
+ for (let tab of client.tabs) {
+ tab.device = client.name;
+ tab.deviceType = client.clientType;
+ }
+ tabs = [...tabs, ...client.tabs.reverse()];
+ }
+ if (extraParams.removeAllDupes) {
+ tabs = this._filterRecentTabsDupes(tabs);
+ }
+ tabs = tabs.sort((a, b) => b.lastUsed - a.lastUsed).slice(0, maxCount);
+ return tabs;
+ },
+
+ _filterRecentTabsDupes(tabs) {
+ // Filter out any tabs with duplicate URLs preserving
+ // the duplicate with the most recent lastUsed value
+ return tabs.filter(tab => {
+ return !tabs.some(t => {
+ return t.url === tab.url && tab.lastUsed < t.lastUsed;
+ });
+ });
+ },
+
+ async getTabClients(filter) {
+ lazy.log.info("Generating tab list with filter", filter);
+ let result = [];
+
+ // If Sync isn't ready, don't try and get anything.
+ if (!lazy.weaveXPCService.ready) {
+ lazy.log.debug("Sync isn't yet ready, so returning an empty tab list");
+ return result;
+ }
+
+ // A boolean that controls whether we should show the icon from the remote tab.
+ const showRemoteIcons = Services.prefs.getBoolPref(
+ "services.sync.syncedTabs.showRemoteIcons",
+ true
+ );
+
+ let engine = lazy.Weave.Service.engineManager.get("tabs");
+
+ let ntabs = 0;
+ let clientTabList = await engine.getAllClients();
+ for (let client of clientTabList) {
+ if (!lazy.Weave.Service.clientsEngine.remoteClientExists(client.id)) {
+ continue;
+ }
+ let clientRepr = await this._makeClient(client);
+ lazy.log.debug("Processing client", clientRepr);
+
+ for (let tab of client.tabs) {
+ let url = tab.urlHistory[0];
+ lazy.log.trace("remote tab", url);
+
+ if (!url) {
+ continue;
+ }
+ let tabRepr = await this._makeTab(client, tab, url, showRemoteIcons);
+ if (filter && !this._tabMatchesFilter(tabRepr, filter)) {
+ continue;
+ }
+ clientRepr.tabs.push(tabRepr);
+ }
+ // We return all clients, even those without tabs - the consumer should
+ // filter it if they care.
+ ntabs += clientRepr.tabs.length;
+ result.push(clientRepr);
+ }
+ lazy.log.info(
+ `Final tab list has ${result.length} clients with ${ntabs} tabs.`
+ );
+ return result;
+ },
+
+ async syncTabs(force) {
+ if (!force) {
+ // Don't bother refetching tabs if we already did so recently
+ let lastFetch = Services.prefs.getIntPref(
+ "services.sync.lastTabFetch",
+ 0
+ );
+ let now = Math.floor(Date.now() / 1000);
+ if (now - lastFetch < TABS_FRESH_ENOUGH_INTERVAL_SECONDS) {
+ lazy.log.info("_refetchTabs was done recently, do not doing it again");
+ return false;
+ }
+ }
+
+ // If Sync isn't configured don't try and sync, else we will get reports
+ // of a login failure.
+ if (lazy.Weave.Status.checkSetup() === lazy.CLIENT_NOT_CONFIGURED) {
+ lazy.log.info(
+ "Sync client is not configured, so not attempting a tab sync"
+ );
+ return false;
+ }
+ // If the primary pass is locked, we should not try to sync
+ if (lazy.Weave.Utils.mpLocked()) {
+ lazy.log.info(
+ "Can't sync tabs due to the primary password being locked",
+ lazy.Weave.Status.login
+ );
+ return false;
+ }
+ // Ask Sync to just do the tabs engine if it can.
+ try {
+ lazy.log.info("Doing a tab sync.");
+ await lazy.Weave.Service.sync({ why: "tabs", engines: ["tabs"] });
+ return true;
+ } catch (ex) {
+ lazy.log.error("Sync failed", ex);
+ throw ex;
+ }
+ },
+
+ observe(subject, topic, data) {
+ lazy.log.trace(`observed topic=${topic}, data=${data}, subject=${subject}`);
+ switch (topic) {
+ case "weave:engine:sync:finish":
+ if (data != "tabs") {
+ return;
+ }
+ // The tabs engine just finished syncing
+ // Set our lastTabFetch pref here so it tracks both explicit sync calls
+ // and normally scheduled ones.
+ Services.prefs.setIntPref(
+ "services.sync.lastTabFetch",
+ Math.floor(Date.now() / 1000)
+ );
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED);
+ break;
+ case "weave:service:start-over":
+ // start-over needs to notify so consumers find no tabs.
+ Services.prefs.clearUserPref("services.sync.lastTabFetch");
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED);
+ break;
+ case "nsPref:changed":
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED);
+ break;
+ default:
+ break;
+ }
+ },
+
+ // Returns true if Sync is configured to Sync tabs, false otherwise
+ get isConfiguredToSyncTabs() {
+ if (!lazy.weaveXPCService.ready) {
+ lazy.log.debug("Sync isn't yet ready; assuming tab engine is enabled");
+ return true;
+ }
+
+ let engine = lazy.Weave.Service.engineManager.get("tabs");
+ return engine && engine.enabled;
+ },
+
+ get hasSyncedThisSession() {
+ let engine = lazy.Weave.Service.engineManager.get("tabs");
+ return engine && engine.hasSyncedThisSession;
+ },
+};
+
+Services.obs.addObserver(SyncedTabsInternal, "weave:engine:sync:finish");
+Services.obs.addObserver(SyncedTabsInternal, "weave:service:start-over");
+// Observe the pref the indicates the state of the tabs engine has changed.
+// This will force consumers to re-evaluate the state of sync and update
+// accordingly.
+Services.prefs.addObserver("services.sync.engine.tabs", SyncedTabsInternal);
+
+// The public interface.
+export var SyncedTabs = {
+ // A mock-point for tests.
+ _internal: SyncedTabsInternal,
+
+ // We make the topic for the observer notification public.
+ TOPIC_TABS_CHANGED,
+
+ // Expose the interval used to determine if synced tabs data needs a new sync
+ TABS_FRESH_ENOUGH_INTERVAL_SECONDS,
+
+ // Returns true if Sync is configured to Sync tabs, false otherwise
+ get isConfiguredToSyncTabs() {
+ return this._internal.isConfiguredToSyncTabs;
+ },
+
+ // Returns true if a tab sync has completed once this session. If this
+ // returns false, then getting back no clients/tabs possibly just means we
+ // are waiting for that first sync to complete.
+ get hasSyncedThisSession() {
+ return this._internal.hasSyncedThisSession;
+ },
+
+ // Return a promise that resolves with an array of client records, each with
+ // a .tabs array. Note that part of the contract for this module is that the
+ // returned objects are not shared between invocations, so callers are free
+ // to mutate the returned objects (eg, sort, truncate) however they see fit.
+ getTabClients(query) {
+ return this._internal.getTabClients(query);
+ },
+
+ // Starts a background request to start syncing tabs. Returns a promise that
+ // resolves when the sync is complete, but there's no resolved value -
+ // callers should be listening for TOPIC_TABS_CHANGED.
+ // If |force| is true we always sync. If false, we only sync if the most
+ // recent sync wasn't "recently".
+ syncTabs(force) {
+ return this._internal.syncTabs(force);
+ },
+
+ createRecentTabsList(clients, maxCount, extraParams) {
+ return this._internal._createRecentTabsList(clients, maxCount, extraParams);
+ },
+
+ sortTabClientsByLastUsed(clients) {
+ // First sort the list of tabs for each client. Note that
+ // this module promises that the objects it returns are never
+ // shared, so we are free to mutate those objects directly.
+ for (let client of clients) {
+ let tabs = client.tabs;
+ tabs.sort((a, b) => b.lastUsed - a.lastUsed);
+ }
+ // Now sort the clients - the clients are sorted in the order of the
+ // most recent tab for that client (ie, it is important the tabs for
+ // each client are already sorted.)
+ clients.sort((a, b) => {
+ if (!a.tabs.length) {
+ return 1; // b comes first.
+ }
+ if (!b.tabs.length) {
+ return -1; // a comes first.
+ }
+ return b.tabs[0].lastUsed - a.tabs[0].lastUsed;
+ });
+ },
+
+ recordSyncedTabsTelemetry(object, tabEvent, extraOptions) {
+ Services.telemetry.setEventRecordingEnabled("synced_tabs", true);
+ Services.telemetry.recordEvent(
+ "synced_tabs",
+ tabEvent,
+ object,
+ null,
+ extraOptions
+ );
+ },
+
+ // Get list of synced tabs across all devices/clients
+ // truncated by value of maxCount param, sorted by
+ // lastUsed value, and filtered for duplicate URLs
+ async getRecentTabs(maxCount, extraParams) {
+ let clients = await this.getTabClients();
+ return this._internal._createRecentTabsList(clients, maxCount, extraParams);
+ },
+};
diff --git a/services/sync/modules/UIState.sys.mjs b/services/sync/modules/UIState.sys.mjs
new file mode 100644
index 0000000000..8981d81f7d
--- /dev/null
+++ b/services/sync/modules/UIState.sys.mjs
@@ -0,0 +1,285 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * @typedef {Object} UIState
+ * @property {string} status The Sync/FxA status, see STATUS_* constants.
+ * @property {string} [email] The FxA email configured to log-in with Sync.
+ * @property {string} [displayName] The user's FxA display name.
+ * @property {string} [avatarURL] The user's FxA avatar URL.
+ * @property {Date} [lastSync] The last sync time.
+ * @property {boolean} [syncing] Whether or not we are currently syncing.
+ */
+
+const lazy = {};
+ChromeUtils.defineESModuleGetters(lazy, {
+ LOGIN_FAILED_LOGIN_REJECTED: "resource://services-sync/constants.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+const TOPICS = [
+ "weave:connected",
+ "weave:service:login:got-hashed-id",
+ "weave:service:login:error",
+ "weave:service:ready",
+ "weave:service:sync:start",
+ "weave:service:sync:finish",
+ "weave:service:sync:error",
+ "weave:service:start-over:finish",
+ "fxaccounts:onverified",
+ "fxaccounts:onlogin", // Defined in FxAccountsCommon, pulling it is expensive.
+ "fxaccounts:onlogout",
+ "fxaccounts:profilechange",
+ "fxaccounts:statechange",
+];
+
+const ON_UPDATE = "sync-ui-state:update";
+
+const STATUS_NOT_CONFIGURED = "not_configured";
+const STATUS_LOGIN_FAILED = "login_failed";
+const STATUS_NOT_VERIFIED = "not_verified";
+const STATUS_SIGNED_IN = "signed_in";
+
+const DEFAULT_STATE = {
+ status: STATUS_NOT_CONFIGURED,
+};
+
+const UIStateInternal = {
+ _initialized: false,
+ _state: null,
+
+ // We keep _syncing out of the state object because we can only track it
+ // using sync events and we can't determine it at any point in time.
+ _syncing: false,
+
+ get state() {
+ if (!this._state) {
+ return DEFAULT_STATE;
+ }
+ return Object.assign({}, this._state, { syncing: this._syncing });
+ },
+
+ isReady() {
+ if (!this._initialized) {
+ this.init();
+ return false;
+ }
+ return true;
+ },
+
+ init() {
+ this._initialized = true;
+ // Because the FxA toolbar is usually visible, this module gets loaded at
+ // browser startup, and we want to avoid pulling in all of FxA or Sync at
+ // that time, so we refresh the state after the browser has settled.
+ Services.tm.idleDispatchToMainThread(() => {
+ this.refreshState().catch(e => {
+ console.error(e);
+ });
+ }, 2000);
+ },
+
+ // Used for testing.
+ reset() {
+ this._state = null;
+ this._syncing = false;
+ this._initialized = false;
+ },
+
+ observe(subject, topic, data) {
+ switch (topic) {
+ case "weave:service:sync:start":
+ this.toggleSyncActivity(true);
+ break;
+ case "weave:service:sync:finish":
+ case "weave:service:sync:error":
+ this.toggleSyncActivity(false);
+ break;
+ default:
+ this.refreshState().catch(e => {
+ console.error(e);
+ });
+ break;
+ }
+ },
+
+ // Builds a new state from scratch.
+ async refreshState() {
+ const newState = {};
+ await this._refreshFxAState(newState);
+ // Optimize the "not signed in" case to avoid refreshing twice just after
+ // startup - if there's currently no _state, and we still aren't configured,
+ // just early exit.
+ if (this._state == null && newState.status == DEFAULT_STATE.status) {
+ return this.state;
+ }
+ if (newState.syncEnabled) {
+ this._setLastSyncTime(newState); // We want this in case we change accounts.
+ }
+ this._state = newState;
+
+ this.notifyStateUpdated();
+ return this.state;
+ },
+
+ // Update the current state with the last sync time/currently syncing status.
+ toggleSyncActivity(syncing) {
+ this._syncing = syncing;
+ this._setLastSyncTime(this._state);
+
+ this.notifyStateUpdated();
+ },
+
+ notifyStateUpdated() {
+ Services.obs.notifyObservers(null, ON_UPDATE);
+ },
+
+ async _refreshFxAState(newState) {
+ let userData = await this._getUserData();
+ await this._populateWithUserData(newState, userData);
+ },
+
+ async _populateWithUserData(state, userData) {
+ let status;
+ let syncUserName = Services.prefs.getStringPref(
+ "services.sync.username",
+ ""
+ );
+ if (!userData) {
+ // If Sync thinks it is configured but there's no FxA user, then we
+ // want to enter the "login failed" state so the user can get
+ // reconfigured.
+ if (syncUserName) {
+ state.email = syncUserName;
+ status = STATUS_LOGIN_FAILED;
+ } else {
+ // everyone agrees nothing is configured.
+ status = STATUS_NOT_CONFIGURED;
+ }
+ } else {
+ let loginFailed = await this._loginFailed();
+ if (loginFailed) {
+ status = STATUS_LOGIN_FAILED;
+ } else if (!userData.verified) {
+ status = STATUS_NOT_VERIFIED;
+ } else {
+ status = STATUS_SIGNED_IN;
+ }
+ state.uid = userData.uid;
+ state.email = userData.email;
+ state.displayName = userData.displayName;
+ // for better or worse, this module renames these attribues.
+ state.avatarURL = userData.avatar;
+ state.avatarIsDefault = userData.avatarDefault;
+ state.syncEnabled = !!syncUserName;
+ }
+ state.status = status;
+ },
+
+ async _getUserData() {
+ try {
+ return await this.fxAccounts.getSignedInUser();
+ } catch (e) {
+ // This is most likely in tests, where we quickly log users in and out.
+ // The most likely scenario is a user logged out, so reflect that.
+ // Bug 995134 calls for better errors so we could retry if we were
+ // sure this was the failure reason.
+ console.error("Error updating FxA account info:", e);
+ return null;
+ }
+ },
+
+ _setLastSyncTime(state) {
+ if (state?.status == UIState.STATUS_SIGNED_IN) {
+ const lastSync = Services.prefs.getStringPref(
+ "services.sync.lastSync",
+ null
+ );
+ state.lastSync = lastSync ? new Date(lastSync) : null;
+ }
+ },
+
+ async _loginFailed() {
+ // First ask FxA if it thinks the user needs re-authentication. In practice,
+ // this check is probably canonical (ie, we probably don't really need
+ // the check below at all as we drop local session info on the first sign
+ // of a problem) - but we keep it for now to keep the risk down.
+ let hasLocalSession = await this.fxAccounts.hasLocalSession();
+ if (!hasLocalSession) {
+ return true;
+ }
+
+ // Referencing Weave.Service will implicitly initialize sync, and we don't
+ // want to force that - so first check if it is ready.
+ let service = Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+ if (!service.ready) {
+ return false;
+ }
+ // LOGIN_FAILED_LOGIN_REJECTED explicitly means "you must log back in".
+ // All other login failures are assumed to be transient and should go
+ // away by themselves, so aren't reflected here.
+ return lazy.Weave.Status.login == lazy.LOGIN_FAILED_LOGIN_REJECTED;
+ },
+
+ set fxAccounts(mockFxAccounts) {
+ delete this.fxAccounts;
+ this.fxAccounts = mockFxAccounts;
+ },
+};
+
+ChromeUtils.defineLazyGetter(UIStateInternal, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+for (let topic of TOPICS) {
+ Services.obs.addObserver(UIStateInternal, topic);
+}
+
+export var UIState = {
+ _internal: UIStateInternal,
+
+ ON_UPDATE,
+
+ STATUS_NOT_CONFIGURED,
+ STATUS_LOGIN_FAILED,
+ STATUS_NOT_VERIFIED,
+ STATUS_SIGNED_IN,
+
+ /**
+ * Returns true if the module has been initialized and the state set.
+ * If not, return false and trigger an init in the background.
+ */
+ isReady() {
+ return this._internal.isReady();
+ },
+
+ /**
+ * @returns {UIState} The current Sync/FxA UI State.
+ */
+ get() {
+ return this._internal.state;
+ },
+
+ /**
+ * Refresh the state. Used for testing, don't call this directly since
+ * UIState already listens to Sync/FxA notifications to determine if the state
+ * needs to be refreshed. ON_UPDATE will be fired once the state is refreshed.
+ *
+ * @returns {Promise<UIState>} Resolved once the state is refreshed.
+ */
+ refresh() {
+ return this._internal.refreshState();
+ },
+
+ /**
+ * Reset the state of the whole module. Used for testing.
+ */
+ reset() {
+ this._internal.reset();
+ },
+};
diff --git a/services/sync/modules/addonsreconciler.sys.mjs b/services/sync/modules/addonsreconciler.sys.mjs
new file mode 100644
index 0000000000..902f57348e
--- /dev/null
+++ b/services/sync/modules/addonsreconciler.sys.mjs
@@ -0,0 +1,584 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains middleware to reconcile state of AddonManager for
+ * purposes of tracking events for Sync. The content in this file exists
+ * because AddonManager does not have a getChangesSinceX() API and adding
+ * that functionality properly was deemed too time-consuming at the time
+ * add-on sync was originally written. If/when AddonManager adds this API,
+ * this file can go away and the add-ons engine can be rewritten to use it.
+ *
+ * It was decided to have this tracking functionality exist in a separate
+ * standalone file so it could be more easily understood, tested, and
+ * hopefully ported.
+ */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { AddonManager } from "resource://gre/modules/AddonManager.sys.mjs";
+
+const DEFAULT_STATE_FILE = "addonsreconciler";
+
+export var CHANGE_INSTALLED = 1;
+export var CHANGE_UNINSTALLED = 2;
+export var CHANGE_ENABLED = 3;
+export var CHANGE_DISABLED = 4;
+
+/**
+ * Maintains state of add-ons.
+ *
+ * State is maintained in 2 data structures, an object mapping add-on IDs
+ * to metadata and an array of changes over time. The object mapping can be
+ * thought of as a minimal copy of data from AddonManager which is needed for
+ * Sync. The array is effectively a log of changes over time.
+ *
+ * The data structures are persisted to disk by serializing to a JSON file in
+ * the current profile. The data structures are updated by 2 mechanisms. First,
+ * they can be refreshed from the global state of the AddonManager. This is a
+ * sure-fire way of ensuring the reconciler is up to date. Second, the
+ * reconciler adds itself as an AddonManager listener. When it receives change
+ * notifications, it updates its internal state incrementally.
+ *
+ * The internal state is persisted to a JSON file in the profile directory.
+ *
+ * An instance of this is bound to an AddonsEngine instance. In reality, it
+ * likely exists as a singleton. To AddonsEngine, it functions as a store and
+ * an entity which emits events for tracking.
+ *
+ * The usage pattern for instances of this class is:
+ *
+ * let reconciler = new AddonsReconciler(...);
+ * await reconciler.ensureStateLoaded();
+ *
+ * // At this point, your instance should be ready to use.
+ *
+ * When you are finished with the instance, please call:
+ *
+ * reconciler.stopListening();
+ * await reconciler.saveState(...);
+ *
+ * This class uses the AddonManager AddonListener interface.
+ * When an add-on is installed, listeners are called in the following order:
+ * AL.onInstalling, AL.onInstalled
+ *
+ * For uninstalls, we see AL.onUninstalling then AL.onUninstalled.
+ *
+ * Enabling and disabling work by sending:
+ *
+ * AL.onEnabling, AL.onEnabled
+ * AL.onDisabling, AL.onDisabled
+ *
+ * Actions can be undone. All undoable actions notify the same
+ * AL.onOperationCancelled event. We treat this event like any other.
+ *
+ * When an add-on is uninstalled from about:addons, the user is offered an
+ * "Undo" option, which leads to the following sequence of events as
+ * observed by an AddonListener:
+ * Add-ons are first disabled then they are actually uninstalled. So, we will
+ * see AL.onDisabling and AL.onDisabled. The onUninstalling and onUninstalled
+ * events only come after the Addon Manager is closed or another view is
+ * switched to. In the case of Sync performing the uninstall, the uninstall
+ * events will occur immediately. However, we still see disabling events and
+ * heed them like they were normal. In the end, the state is proper.
+ */
+export function AddonsReconciler(queueCaller) {
+ this._log = Log.repository.getLogger("Sync.AddonsReconciler");
+ this._log.manageLevelFromPref("services.sync.log.logger.addonsreconciler");
+ this.queueCaller = queueCaller;
+
+ Svc.Obs.add("xpcom-shutdown", this.stopListening, this);
+}
+
+AddonsReconciler.prototype = {
+ /** Flag indicating whether we are listening to AddonManager events. */
+ _listening: false,
+
+ /**
+ * Define this as false if the reconciler should not persist state
+ * to disk when handling events.
+ *
+ * This allows test code to avoid spinning to write during observer
+ * notifications and xpcom shutdown, which appears to cause hangs on WinXP
+ * (Bug 873861).
+ */
+ _shouldPersist: true,
+
+ /** Log logger instance */
+ _log: null,
+
+ /**
+ * Container for add-on metadata.
+ *
+ * Keys are add-on IDs. Values are objects which describe the state of the
+ * add-on. This is a minimal mirror of data that can be queried from
+ * AddonManager. In some cases, we retain data longer than AddonManager.
+ */
+ _addons: {},
+
+ /**
+ * List of add-on changes over time.
+ *
+ * Each element is an array of [time, change, id].
+ */
+ _changes: [],
+
+ /**
+ * Objects subscribed to changes made to this instance.
+ */
+ _listeners: [],
+
+ /**
+ * Accessor for add-ons in this object.
+ *
+ * Returns an object mapping add-on IDs to objects containing metadata.
+ */
+ get addons() {
+ return this._addons;
+ },
+
+ async ensureStateLoaded() {
+ if (!this._promiseStateLoaded) {
+ this._promiseStateLoaded = this.loadState();
+ }
+ return this._promiseStateLoaded;
+ },
+
+ /**
+ * Load reconciler state from a file.
+ *
+ * The path is relative to the weave directory in the profile. If no
+ * path is given, the default one is used.
+ *
+ * If the file does not exist or there was an error parsing the file, the
+ * state will be transparently defined as empty.
+ *
+ * @param file
+ * Path to load. ".json" is appended automatically. If not defined,
+ * a default path will be consulted.
+ */
+ async loadState(file = DEFAULT_STATE_FILE) {
+ let json = await Utils.jsonLoad(file, this);
+ this._addons = {};
+ this._changes = [];
+
+ if (!json) {
+ this._log.debug("No data seen in loaded file: " + file);
+ return false;
+ }
+
+ let version = json.version;
+ if (!version || version != 1) {
+ this._log.error(
+ "Could not load JSON file because version not " +
+ "supported: " +
+ version
+ );
+ return false;
+ }
+
+ this._addons = json.addons;
+ for (let id in this._addons) {
+ let record = this._addons[id];
+ record.modified = new Date(record.modified);
+ }
+
+ for (let [time, change, id] of json.changes) {
+ this._changes.push([new Date(time), change, id]);
+ }
+
+ return true;
+ },
+
+ /**
+ * Saves the current state to a file in the local profile.
+ *
+ * @param file
+ * String path in profile to save to. If not defined, the default
+ * will be used.
+ */
+ async saveState(file = DEFAULT_STATE_FILE) {
+ let state = { version: 1, addons: {}, changes: [] };
+
+ for (let [id, record] of Object.entries(this._addons)) {
+ state.addons[id] = {};
+ for (let [k, v] of Object.entries(record)) {
+ if (k == "modified") {
+ state.addons[id][k] = v.getTime();
+ } else {
+ state.addons[id][k] = v;
+ }
+ }
+ }
+
+ for (let [time, change, id] of this._changes) {
+ state.changes.push([time.getTime(), change, id]);
+ }
+
+ this._log.info("Saving reconciler state to file: " + file);
+ await Utils.jsonSave(file, this, state);
+ },
+
+ /**
+ * Registers a change listener with this instance.
+ *
+ * Change listeners are called every time a change is recorded. The listener
+ * is an object with the function "changeListener" that takes 3 arguments,
+ * the Date at which the change happened, the type of change (a CHANGE_*
+ * constant), and the add-on state object reflecting the current state of
+ * the add-on at the time of the change.
+ *
+ * @param listener
+ * Object containing changeListener function.
+ */
+ addChangeListener: function addChangeListener(listener) {
+ if (!this._listeners.includes(listener)) {
+ this._log.debug("Adding change listener.");
+ this._listeners.push(listener);
+ }
+ },
+
+ /**
+ * Removes a previously-installed change listener from the instance.
+ *
+ * @param listener
+ * Listener instance to remove.
+ */
+ removeChangeListener: function removeChangeListener(listener) {
+ this._listeners = this._listeners.filter(element => {
+ if (element == listener) {
+ this._log.debug("Removing change listener.");
+ return false;
+ }
+ return true;
+ });
+ },
+
+ /**
+ * Tells the instance to start listening for AddonManager changes.
+ *
+ * This is typically called automatically when Sync is loaded.
+ */
+ startListening: function startListening() {
+ if (this._listening) {
+ return;
+ }
+
+ this._log.info("Registering as Add-on Manager listener.");
+ AddonManager.addAddonListener(this);
+ this._listening = true;
+ },
+
+ /**
+ * Tells the instance to stop listening for AddonManager changes.
+ *
+ * The reconciler should always be listening. This should only be called when
+ * the instance is being destroyed.
+ *
+ * This function will get called automatically on XPCOM shutdown. However, it
+ * is a best practice to call it yourself.
+ */
+ stopListening: function stopListening() {
+ if (!this._listening) {
+ return;
+ }
+
+ this._log.debug("Stopping listening and removing AddonManager listener.");
+ AddonManager.removeAddonListener(this);
+ this._listening = false;
+ },
+
+ /**
+ * Refreshes the global state of add-ons by querying the AddonManager.
+ */
+ async refreshGlobalState() {
+ this._log.info("Refreshing global state from AddonManager.");
+
+ let installs;
+ let addons = await AddonManager.getAllAddons();
+
+ let ids = {};
+
+ for (let addon of addons) {
+ ids[addon.id] = true;
+ await this.rectifyStateFromAddon(addon);
+ }
+
+ // Look for locally-defined add-ons that no longer exist and update their
+ // record.
+ for (let [id, addon] of Object.entries(this._addons)) {
+ if (id in ids) {
+ continue;
+ }
+
+ // If the id isn't in ids, it means that the add-on has been deleted or
+ // the add-on is in the process of being installed. We detect the
+ // latter by seeing if an AddonInstall is found for this add-on.
+
+ if (!installs) {
+ installs = await AddonManager.getAllInstalls();
+ }
+
+ let installFound = false;
+ for (let install of installs) {
+ if (
+ install.addon &&
+ install.addon.id == id &&
+ install.state == AddonManager.STATE_INSTALLED
+ ) {
+ installFound = true;
+ break;
+ }
+ }
+
+ if (installFound) {
+ continue;
+ }
+
+ if (addon.installed) {
+ addon.installed = false;
+ this._log.debug(
+ "Adding change because add-on not present in " +
+ "Add-on Manager: " +
+ id
+ );
+ await this._addChange(new Date(), CHANGE_UNINSTALLED, addon);
+ }
+ }
+
+ // See note for _shouldPersist.
+ if (this._shouldPersist) {
+ await this.saveState();
+ }
+ },
+
+ /**
+ * Rectifies the state of an add-on from an Addon instance.
+ *
+ * This basically says "given an Addon instance, assume it is truth and
+ * apply changes to the local state to reflect it."
+ *
+ * This function could result in change listeners being called if the local
+ * state differs from the passed add-on's state.
+ *
+ * @param addon
+ * Addon instance being updated.
+ */
+ async rectifyStateFromAddon(addon) {
+ this._log.debug(
+ `Rectifying state for addon ${addon.name} (version=${addon.version}, id=${addon.id})`
+ );
+
+ let id = addon.id;
+ let enabled = !addon.userDisabled;
+ let guid = addon.syncGUID;
+ let now = new Date();
+
+ if (!(id in this._addons)) {
+ let record = {
+ id,
+ guid,
+ enabled,
+ installed: true,
+ modified: now,
+ type: addon.type,
+ scope: addon.scope,
+ foreignInstall: addon.foreignInstall,
+ isSyncable: addon.isSyncable,
+ };
+ this._addons[id] = record;
+ this._log.debug(
+ "Adding change because add-on not present locally: " + id
+ );
+ await this._addChange(now, CHANGE_INSTALLED, record);
+ return;
+ }
+
+ let record = this._addons[id];
+ record.isSyncable = addon.isSyncable;
+
+ if (!record.installed) {
+ // It is possible the record is marked as uninstalled because an
+ // uninstall is pending.
+ if (!(addon.pendingOperations & AddonManager.PENDING_UNINSTALL)) {
+ record.installed = true;
+ record.modified = now;
+ }
+ }
+
+ if (record.enabled != enabled) {
+ record.enabled = enabled;
+ record.modified = now;
+ let change = enabled ? CHANGE_ENABLED : CHANGE_DISABLED;
+ this._log.debug("Adding change because enabled state changed: " + id);
+ await this._addChange(new Date(), change, record);
+ }
+
+ if (record.guid != guid) {
+ record.guid = guid;
+ // We don't record a change because the Sync engine rectifies this on its
+ // own. This is tightly coupled with Sync. If this code is ever lifted
+ // outside of Sync, this exception should likely be removed.
+ }
+ },
+
+ /**
+ * Record a change in add-on state.
+ *
+ * @param date
+ * Date at which the change occurred.
+ * @param change
+ * The type of the change. A CHANGE_* constant.
+ * @param state
+ * The new state of the add-on. From this.addons.
+ */
+ async _addChange(date, change, state) {
+ this._log.info("Change recorded for " + state.id);
+ this._changes.push([date, change, state.id]);
+
+ for (let listener of this._listeners) {
+ try {
+ await listener.changeListener(date, change, state);
+ } catch (ex) {
+ this._log.error("Exception calling change listener", ex);
+ }
+ }
+ },
+
+ /**
+ * Obtain the set of changes to add-ons since the date passed.
+ *
+ * This will return an array of arrays. Each entry in the array has the
+ * elements [date, change_type, id], where
+ *
+ * date - Date instance representing when the change occurred.
+ * change_type - One of CHANGE_* constants.
+ * id - ID of add-on that changed.
+ */
+ getChangesSinceDate(date) {
+ let length = this._changes.length;
+ for (let i = 0; i < length; i++) {
+ if (this._changes[i][0] >= date) {
+ return this._changes.slice(i);
+ }
+ }
+
+ return [];
+ },
+
+ /**
+ * Prunes all recorded changes from before the specified Date.
+ *
+ * @param date
+ * Entries older than this Date will be removed.
+ */
+ pruneChangesBeforeDate(date) {
+ this._changes = this._changes.filter(function test_age(change) {
+ return change[0] >= date;
+ });
+ },
+
+ /**
+ * Obtains the set of all known Sync GUIDs for add-ons.
+ */
+ getAllSyncGUIDs() {
+ let result = {};
+ for (let id in this.addons) {
+ result[id] = true;
+ }
+
+ return result;
+ },
+
+ /**
+ * Obtain the add-on state record for an add-on by Sync GUID.
+ *
+ * If the add-on could not be found, returns null.
+ *
+ * @param guid
+ * Sync GUID of add-on to retrieve.
+ */
+ getAddonStateFromSyncGUID(guid) {
+ for (let id in this.addons) {
+ let addon = this.addons[id];
+ if (addon.guid == guid) {
+ return addon;
+ }
+ }
+
+ return null;
+ },
+
+ /**
+ * Handler that is invoked as part of the AddonManager listeners.
+ */
+ async _handleListener(action, addon) {
+ // Since this is called as an observer, we explicitly trap errors and
+ // log them to ourselves so we don't see errors reported elsewhere.
+ try {
+ let id = addon.id;
+ this._log.debug("Add-on change: " + action + " to " + id);
+
+ switch (action) {
+ case "onEnabled":
+ case "onDisabled":
+ case "onInstalled":
+ case "onInstallEnded":
+ case "onOperationCancelled":
+ await this.rectifyStateFromAddon(addon);
+ break;
+
+ case "onUninstalled":
+ let id = addon.id;
+ let addons = this.addons;
+ if (id in addons) {
+ let now = new Date();
+ let record = addons[id];
+ record.installed = false;
+ record.modified = now;
+ this._log.debug(
+ "Adding change because of uninstall listener: " + id
+ );
+ await this._addChange(now, CHANGE_UNINSTALLED, record);
+ }
+ }
+
+ // See note for _shouldPersist.
+ if (this._shouldPersist) {
+ await this.saveState();
+ }
+ } catch (ex) {
+ this._log.warn("Exception", ex);
+ }
+ },
+
+ // AddonListeners
+ onEnabled: function onEnabled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onEnabled", addon)
+ );
+ },
+ onDisabled: function onDisabled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onDisabled", addon)
+ );
+ },
+ onInstalled: function onInstalled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onInstalled", addon)
+ );
+ },
+ onUninstalled: function onUninstalled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onUninstalled", addon)
+ );
+ },
+ onOperationCancelled: function onOperationCancelled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onOperationCancelled", addon)
+ );
+ },
+};
diff --git a/services/sync/modules/addonutils.sys.mjs b/services/sync/modules/addonutils.sys.mjs
new file mode 100644
index 0000000000..08a8c5b5f0
--- /dev/null
+++ b/services/sync/modules/addonutils.sys.mjs
@@ -0,0 +1,391 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Svc } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+ AddonRepository: "resource://gre/modules/addons/AddonRepository.sys.mjs",
+});
+
+function AddonUtilsInternal() {
+ this._log = Log.repository.getLogger("Sync.AddonUtils");
+ this._log.Level =
+ Log.Level[Svc.PrefBranch.getStringPref("log.logger.addonutils", null)];
+}
+AddonUtilsInternal.prototype = {
+ /**
+ * Obtain an AddonInstall object from an AddonSearchResult instance.
+ *
+ * The returned promise will be an AddonInstall on success or null (failure or
+ * addon not found)
+ *
+ * @param addon
+ * AddonSearchResult to obtain install from.
+ */
+ getInstallFromSearchResult(addon) {
+ this._log.debug("Obtaining install for " + addon.id);
+
+ // We should theoretically be able to obtain (and use) addon.install if
+ // it is available. However, the addon.sourceURI rewriting won't be
+ // reflected in the AddonInstall, so we can't use it. If we ever get rid
+ // of sourceURI rewriting, we can avoid having to reconstruct the
+ // AddonInstall.
+ return lazy.AddonManager.getInstallForURL(addon.sourceURI.spec, {
+ name: addon.name,
+ icons: addon.iconURL,
+ version: addon.version,
+ telemetryInfo: { source: "sync" },
+ });
+ },
+
+ /**
+ * Installs an add-on from an AddonSearchResult instance.
+ *
+ * The options argument defines extra options to control the install.
+ * Recognized keys in this map are:
+ *
+ * syncGUID - Sync GUID to use for the new add-on.
+ * enabled - Boolean indicating whether the add-on should be enabled upon
+ * install.
+ *
+ * The result object has the following keys:
+ *
+ * id ID of add-on that was installed.
+ * install AddonInstall that was installed.
+ * addon Addon that was installed.
+ *
+ * @param addon
+ * AddonSearchResult to install add-on from.
+ * @param options
+ * Object with additional metadata describing how to install add-on.
+ */
+ async installAddonFromSearchResult(addon, options) {
+ this._log.info("Trying to install add-on from search result: " + addon.id);
+
+ const install = await this.getInstallFromSearchResult(addon);
+ if (!install) {
+ throw new Error("AddonInstall not available: " + addon.id);
+ }
+
+ try {
+ this._log.info("Installing " + addon.id);
+ let log = this._log;
+
+ return new Promise((res, rej) => {
+ let listener = {
+ onInstallStarted: function onInstallStarted(install) {
+ if (!options) {
+ return;
+ }
+
+ if (options.syncGUID) {
+ log.info(
+ "Setting syncGUID of " + install.name + ": " + options.syncGUID
+ );
+ install.addon.syncGUID = options.syncGUID;
+ }
+
+ // We only need to change userDisabled if it is disabled because
+ // enabled is the default.
+ if ("enabled" in options && !options.enabled) {
+ log.info(
+ "Marking add-on as disabled for install: " + install.name
+ );
+ install.addon.disable();
+ }
+ },
+ onInstallEnded(install, addon) {
+ install.removeListener(listener);
+
+ res({ id: addon.id, install, addon });
+ },
+ onInstallFailed(install) {
+ install.removeListener(listener);
+
+ rej(new Error("Install failed: " + install.error));
+ },
+ onDownloadFailed(install) {
+ install.removeListener(listener);
+
+ rej(new Error("Download failed: " + install.error));
+ },
+ };
+ install.addListener(listener);
+ install.install();
+ });
+ } catch (ex) {
+ this._log.error("Error installing add-on", ex);
+ throw ex;
+ }
+ },
+
+ /**
+ * Uninstalls the addon instance.
+ *
+ * @param addon
+ * Addon instance to uninstall.
+ */
+ async uninstallAddon(addon) {
+ return new Promise(res => {
+ let listener = {
+ onUninstalling(uninstalling, needsRestart) {
+ if (addon.id != uninstalling.id) {
+ return;
+ }
+
+ // We assume restartless add-ons will send the onUninstalled event
+ // soon.
+ if (!needsRestart) {
+ return;
+ }
+
+ // For non-restartless add-ons, we issue the callback on uninstalling
+ // because we will likely never see the uninstalled event.
+ lazy.AddonManager.removeAddonListener(listener);
+ res(addon);
+ },
+ onUninstalled(uninstalled) {
+ if (addon.id != uninstalled.id) {
+ return;
+ }
+
+ lazy.AddonManager.removeAddonListener(listener);
+ res(addon);
+ },
+ };
+ lazy.AddonManager.addAddonListener(listener);
+ addon.uninstall();
+ });
+ },
+
+ /**
+ * Installs multiple add-ons specified by metadata.
+ *
+ * The first argument is an array of objects. Each object must have the
+ * following keys:
+ *
+ * id - public ID of the add-on to install.
+ * syncGUID - syncGUID for new add-on.
+ * enabled - boolean indicating whether the add-on should be enabled.
+ * requireSecureURI - Boolean indicating whether to require a secure
+ * URI when installing from a remote location. This defaults to
+ * true.
+ *
+ * The callback will be called when activity on all add-ons is complete. The
+ * callback receives 2 arguments, error and result.
+ *
+ * If error is truthy, it contains a string describing the overall error.
+ *
+ * The 2nd argument to the callback is always an object with details on the
+ * overall execution state. It contains the following keys:
+ *
+ * installedIDs Array of add-on IDs that were installed.
+ * installs Array of AddonInstall instances that were installed.
+ * addons Array of Addon instances that were installed.
+ * errors Array of errors encountered. Only has elements if error is
+ * truthy.
+ *
+ * @param installs
+ * Array of objects describing add-ons to install.
+ */
+ async installAddons(installs) {
+ let ids = [];
+ for (let addon of installs) {
+ ids.push(addon.id);
+ }
+
+ let addons = await lazy.AddonRepository.getAddonsByIDs(ids);
+ this._log.info(
+ `Found ${addons.length} / ${ids.length}` +
+ " add-ons during repository search."
+ );
+
+ let ourResult = {
+ installedIDs: [],
+ installs: [],
+ addons: [],
+ skipped: [],
+ errors: [],
+ };
+
+ let toInstall = [];
+
+ // Rewrite the "src" query string parameter of the source URI to note
+ // that the add-on was installed by Sync and not something else so
+ // server-side metrics aren't skewed (bug 708134). The server should
+ // ideally send proper URLs, but this solution was deemed too
+ // complicated at the time the functionality was implemented.
+ for (let addon of addons) {
+ // Find the specified options for this addon.
+ let options;
+ for (let install of installs) {
+ if (install.id == addon.id) {
+ options = install;
+ break;
+ }
+ }
+ if (!this.canInstallAddon(addon, options)) {
+ ourResult.skipped.push(addon.id);
+ continue;
+ }
+
+ // We can go ahead and attempt to install it.
+ toInstall.push(addon);
+
+ // We should always be able to QI the nsIURI to nsIURL. If not, we
+ // still try to install the add-on, but we don't rewrite the URL,
+ // potentially skewing metrics.
+ try {
+ addon.sourceURI.QueryInterface(Ci.nsIURL);
+ } catch (ex) {
+ this._log.warn(
+ "Unable to QI sourceURI to nsIURL: " + addon.sourceURI.spec
+ );
+ continue;
+ }
+
+ let params = addon.sourceURI.query
+ .split("&")
+ .map(function rewrite(param) {
+ if (param.indexOf("src=") == 0) {
+ return "src=sync";
+ }
+ return param;
+ });
+
+ addon.sourceURI = addon.sourceURI
+ .mutate()
+ .setQuery(params.join("&"))
+ .finalize();
+ }
+
+ if (!toInstall.length) {
+ return ourResult;
+ }
+
+ const installPromises = [];
+ // Start all the installs asynchronously. They will report back to us
+ // as they finish, eventually triggering the global callback.
+ for (let addon of toInstall) {
+ let options = {};
+ for (let install of installs) {
+ if (install.id == addon.id) {
+ options = install;
+ break;
+ }
+ }
+
+ installPromises.push(
+ (async () => {
+ try {
+ const result = await this.installAddonFromSearchResult(
+ addon,
+ options
+ );
+ ourResult.installedIDs.push(result.id);
+ ourResult.installs.push(result.install);
+ ourResult.addons.push(result.addon);
+ } catch (error) {
+ ourResult.errors.push(error);
+ }
+ })()
+ );
+ }
+
+ await Promise.all(installPromises);
+
+ if (ourResult.errors.length) {
+ throw new Error("1 or more add-ons failed to install");
+ }
+ return ourResult;
+ },
+
+ /**
+ * Returns true if we are able to install the specified addon, false
+ * otherwise. It is expected that this will log the reason if it returns
+ * false.
+ *
+ * @param addon
+ * (Addon) Add-on instance to check.
+ * @param options
+ * (object) The options specified for this addon. See installAddons()
+ * for the valid elements.
+ */
+ canInstallAddon(addon, options) {
+ // sourceURI presence isn't enforced by AddonRepository. So, we skip
+ // add-ons without a sourceURI.
+ if (!addon.sourceURI) {
+ this._log.info(
+ "Skipping install of add-on because missing sourceURI: " + addon.id
+ );
+ return false;
+ }
+ // Verify that the source URI uses TLS. We don't allow installs from
+ // insecure sources for security reasons. The Addon Manager ensures
+ // that cert validation etc is performed.
+ // (We should also consider just dropping this entirely and calling
+ // XPIProvider.isInstallAllowed, but that has additional semantics we might
+ // need to think through...)
+ let requireSecureURI = true;
+ if (options && options.requireSecureURI !== undefined) {
+ requireSecureURI = options.requireSecureURI;
+ }
+
+ if (requireSecureURI) {
+ let scheme = addon.sourceURI.scheme;
+ if (scheme != "https") {
+ this._log.info(
+ `Skipping install of add-on "${addon.id}" because sourceURI's scheme of "${scheme}" is not trusted`
+ );
+ return false;
+ }
+ }
+
+ // Policy prevents either installing this addon or any addon
+ if (
+ Services.policies &&
+ (!Services.policies.mayInstallAddon(addon) ||
+ !Services.policies.isAllowed("xpinstall"))
+ ) {
+ this._log.info(
+ `Skipping install of "${addon.id}" due to enterprise policy`
+ );
+ return false;
+ }
+
+ this._log.info(`Add-on "${addon.id}" is able to be installed`);
+ return true;
+ },
+
+ /**
+ * Update the user disabled flag for an add-on.
+ *
+ * If the new flag matches the existing or if the add-on
+ * isn't currently active, the function will return immediately.
+ *
+ * @param addon
+ * (Addon) Add-on instance to operate on.
+ * @param value
+ * (bool) New value for add-on's userDisabled property.
+ */
+ updateUserDisabled(addon, value) {
+ if (addon.userDisabled == value) {
+ return;
+ }
+
+ this._log.info("Updating userDisabled flag: " + addon.id + " -> " + value);
+ if (value) {
+ addon.disable();
+ } else {
+ addon.enable();
+ }
+ },
+};
+
+export const AddonUtils = new AddonUtilsInternal();
diff --git a/services/sync/modules/bridged_engine.sys.mjs b/services/sync/modules/bridged_engine.sys.mjs
new file mode 100644
index 0000000000..45e5f685cd
--- /dev/null
+++ b/services/sync/modules/bridged_engine.sys.mjs
@@ -0,0 +1,499 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file has all the machinery for hooking up bridged engines implemented
+ * in Rust. It's the JavaScript side of the Golden Gate bridge that connects
+ * Desktop Sync to a Rust `BridgedEngine`, via the `mozIBridgedSyncEngine`
+ * XPCOM interface.
+ *
+ * Creating a bridged engine only takes a few lines of code, since most of the
+ * hard work is done on the Rust side. On the JS side, you'll need to subclass
+ * `BridgedEngine` (instead of `SyncEngine`), supply a `mozIBridgedSyncEngine`
+ * for your subclass to wrap, and optionally implement and override the tracker.
+ */
+
+import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
+import { RawCryptoWrapper } from "resource://services-sync/record.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Log: "resource://gre/modules/Log.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+});
+
+/**
+ * A stub store that converts between raw decrypted incoming records and
+ * envelopes. Since the interface we need is so minimal, this class doesn't
+ * inherit from the base `Store` implementation...it would take more code to
+ * override all those behaviors!
+ *
+ * This class isn't meant to be subclassed, because bridged engines shouldn't
+ * override their store classes in `_storeObj`.
+ */
+class BridgedStore {
+ constructor(name, engine) {
+ if (!engine) {
+ throw new Error("Store must be associated with an Engine instance.");
+ }
+ this.engine = engine;
+ this._log = lazy.Log.repository.getLogger(`Sync.Engine.${name}.Store`);
+ this._batchChunkSize = 500;
+ }
+
+ async applyIncomingBatch(records, countTelemetry) {
+ for (let chunk of lazy.PlacesUtils.chunkArray(
+ records,
+ this._batchChunkSize
+ )) {
+ let incomingEnvelopesAsJSON = chunk.map(record =>
+ JSON.stringify(record.toIncomingBso())
+ );
+ this._log.trace("incoming envelopes", incomingEnvelopesAsJSON);
+ await this.engine._bridge.storeIncoming(incomingEnvelopesAsJSON);
+ }
+ // Array of failed records.
+ return [];
+ }
+
+ async wipe() {
+ await this.engine._bridge.wipe();
+ }
+}
+
+/**
+ * A wrapper class to convert between BSOs on the JS side, and envelopes on the
+ * Rust side. This class intentionally subclasses `RawCryptoWrapper`, because we
+ * don't want the stringification and parsing machinery in `CryptoWrapper`.
+ *
+ * This class isn't meant to be subclassed, because bridged engines shouldn't
+ * override their record classes in `_recordObj`.
+ */
+class BridgedRecord extends RawCryptoWrapper {
+ /**
+ * Creates an outgoing record from a BSO returned by a bridged engine.
+ *
+ * @param {String} collection The collection name.
+ * @param {Object} bso The outgoing bso (ie, a sync15::bso::OutgoingBso) returned from
+ * `mozIBridgedSyncEngine::apply`.
+ * @return {BridgedRecord} A Sync record ready to encrypt and upload.
+ */
+ static fromOutgoingBso(collection, bso) {
+ // The BSO has already been JSON serialized coming out of Rust, so the
+ // envelope has been flattened.
+ if (typeof bso.id != "string") {
+ throw new TypeError("Outgoing BSO missing ID");
+ }
+ if (typeof bso.payload != "string") {
+ throw new TypeError("Outgoing BSO missing payload");
+ }
+ let record = new BridgedRecord(collection, bso.id);
+ record.cleartext = bso.payload;
+ return record;
+ }
+
+ transformBeforeEncrypt(cleartext) {
+ if (typeof cleartext != "string") {
+ throw new TypeError("Outgoing bridged engine records must be strings");
+ }
+ return cleartext;
+ }
+
+ transformAfterDecrypt(cleartext) {
+ if (typeof cleartext != "string") {
+ throw new TypeError("Incoming bridged engine records must be strings");
+ }
+ return cleartext;
+ }
+
+ /*
+ * Converts this incoming record into an envelope to pass to a bridged engine.
+ * This object must be kept in sync with `sync15::IncomingBso`.
+ *
+ * @return {Object} The incoming envelope, to pass to
+ * `mozIBridgedSyncEngine::storeIncoming`.
+ */
+ toIncomingBso() {
+ return {
+ id: this.data.id,
+ modified: this.data.modified,
+ payload: this.cleartext,
+ };
+ }
+}
+
+class BridgeError extends Error {
+ constructor(code, message) {
+ super(message);
+ this.name = "BridgeError";
+ // TODO: We may want to use a different name for this, since errors with
+ // a `result` property are treated specially by telemetry, discarding the
+ // message...but, unlike other `nserror`s, the message is actually useful,
+ // and we still want to capture it.
+ this.result = code;
+ }
+}
+
+class InterruptedError extends Error {
+ constructor(message) {
+ super(message);
+ this.name = "InterruptedError";
+ }
+}
+
+/**
+ * Adapts a `Log.sys.mjs` logger to a `mozIServicesLogSink`. This class is copied
+ * from `SyncedBookmarksMirror.jsm`.
+ */
+export class LogAdapter {
+ constructor(log) {
+ this.log = log;
+ }
+
+ get maxLevel() {
+ let level = this.log.level;
+ if (level <= lazy.Log.Level.All) {
+ return Ci.mozIServicesLogSink.LEVEL_TRACE;
+ }
+ if (level <= lazy.Log.Level.Info) {
+ return Ci.mozIServicesLogSink.LEVEL_DEBUG;
+ }
+ if (level <= lazy.Log.Level.Warn) {
+ return Ci.mozIServicesLogSink.LEVEL_WARN;
+ }
+ if (level <= lazy.Log.Level.Error) {
+ return Ci.mozIServicesLogSink.LEVEL_ERROR;
+ }
+ return Ci.mozIServicesLogSink.LEVEL_OFF;
+ }
+
+ trace(message) {
+ this.log.trace(message);
+ }
+
+ debug(message) {
+ this.log.debug(message);
+ }
+
+ warn(message) {
+ this.log.warn(message);
+ }
+
+ error(message) {
+ this.log.error(message);
+ }
+}
+
+// This converts the XPCOM-defined, callback-based mozIBridgedSyncEngine to
+// a promise-based implementation.
+export class BridgeWrapperXPCOM {
+ constructor(component) {
+ this.comp = component;
+ }
+
+ // A few sync, non-callback based attributes.
+ get storageVersion() {
+ return this.comp.storageVersion;
+ }
+
+ get allowSkippedRecord() {
+ return this.comp.allowSkippedRecord;
+ }
+
+ get logger() {
+ return this.comp.logger;
+ }
+
+ // And the async functions we promisify.
+ // Note this is `lastSync` via uniffi but `getLastSync` via xpcom
+ lastSync() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.getLastSync);
+ }
+
+ setLastSync(lastSyncMillis) {
+ return BridgeWrapperXPCOM.#promisify(this.comp.setLastSync, lastSyncMillis);
+ }
+
+ getSyncId() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.getSyncId);
+ }
+
+ resetSyncId() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.resetSyncId);
+ }
+
+ ensureCurrentSyncId(newSyncId) {
+ return BridgeWrapperXPCOM.#promisify(
+ this.comp.ensureCurrentSyncId,
+ newSyncId
+ );
+ }
+
+ syncStarted() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.syncStarted);
+ }
+
+ storeIncoming(incomingEnvelopesAsJSON) {
+ return BridgeWrapperXPCOM.#promisify(
+ this.comp.storeIncoming,
+ incomingEnvelopesAsJSON
+ );
+ }
+
+ apply() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.apply);
+ }
+
+ setUploaded(newTimestampMillis, uploadedIds) {
+ return BridgeWrapperXPCOM.#promisify(
+ this.comp.setUploaded,
+ newTimestampMillis,
+ uploadedIds
+ );
+ }
+
+ syncFinished() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.syncFinished);
+ }
+
+ reset() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.reset);
+ }
+
+ wipe() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.wipe);
+ }
+
+ // Converts a XPCOM bridged function that takes a callback into one that returns a
+ // promise.
+ static #promisify(func, ...params) {
+ return new Promise((resolve, reject) => {
+ func(...params, {
+ // This object implicitly implements all three callback interfaces
+ // (`mozIBridgedSyncEngine{Apply, Result}Callback`), because they have
+ // the same methods. The only difference is the type of the argument
+ // passed to `handleSuccess`, which doesn't matter in JS.
+ handleSuccess: resolve,
+ handleError(code, message) {
+ reject(transformError(code, message));
+ },
+ });
+ });
+ }
+}
+
+/**
+ * A base class used to plug a Rust engine into Sync, and have it work like any
+ * other engine. The constructor takes a bridge as its first argument, which is
+ * a "bridged sync engine", as defined by UniFFI in the application-services
+ * crate.
+ * For backwards compatibility, this can also be an instance of an XPCOM
+ * component class that implements `mozIBridgedSyncEngine`, wrapped in
+ * a `BridgeWrapperXPCOM` wrapper.
+ * (Note that at time of writing, the above is slightly aspirational; the
+ * actual definition of the UniFFI shared bridged engine is still in flux.)
+ *
+ * This class inherits from `SyncEngine`, which has a lot of machinery that we
+ * don't need, but that's fairly easy to override. It would be harder to
+ * reimplement the machinery that we _do_ need here. However, because of that,
+ * this class has lots of methods that do nothing, or return empty data. The
+ * docs above each method explain what it's overriding, and why.
+ *
+ * This class is designed to be subclassed, but the only part that your engine
+ * may want to override is `_trackerObj`. Even then, using the default (no-op)
+ * tracker is fine, because the shape of the `Tracker` interface may not make
+ * sense for all engines.
+ */
+export function BridgedEngine(name, service) {
+ SyncEngine.call(this, name, service);
+}
+
+BridgedEngine.prototype = {
+ /**
+ * The Rust implemented bridge. Must be set by the engine which subclasses us.
+ */
+ _bridge: null,
+ /**
+ * The tracker class for this engine. Subclasses may want to override this
+ * with their own tracker, though using the default `Tracker` is fine.
+ */
+ _trackerObj: Tracker,
+
+ /** Returns the record class for all bridged engines. */
+ get _recordObj() {
+ return BridgedRecord;
+ },
+
+ set _recordObj(obj) {
+ throw new TypeError("Don't override the record class for bridged engines");
+ },
+
+ /** Returns the store class for all bridged engines. */
+ get _storeObj() {
+ return BridgedStore;
+ },
+
+ set _storeObj(obj) {
+ throw new TypeError("Don't override the store class for bridged engines");
+ },
+
+ /** Returns the storage version for this engine. */
+ get version() {
+ return this._bridge.storageVersion;
+ },
+
+ // Legacy engines allow sync to proceed if some records are too large to
+ // upload (eg, a payload that's bigger than the server's published limits).
+ // If this returns true, we will just skip the record without even attempting
+ // to upload. If this is false, we'll abort the entire batch.
+ // If the engine allows this, it will need to detect this scenario by noticing
+ // the ID is not in the 'success' records reported to `setUploaded`.
+ // (Note that this is not to be confused with the fact server's can currently
+ // reject records as part of a POST - but we hope to remove this ability from
+ // the server API. Note also that this is not bullet-proof - if the count of
+ // records is high, it's possible that we will have committed a previous
+ // batch before we hit the relevant limits, so things might have been written.
+ // We hope to fix this by ensuring batch limits are such that this is
+ // impossible)
+ get allowSkippedRecord() {
+ return this._bridge.allowSkippedRecord;
+ },
+
+ /**
+ * Returns the sync ID for this engine. This is exposed for tests, but
+ * Sync code always calls `resetSyncID()` and `ensureCurrentSyncID()`,
+ * not this.
+ *
+ * @returns {String?} The sync ID, or `null` if one isn't set.
+ */
+ async getSyncID() {
+ // Note that all methods on an XPCOM class instance are automatically bound,
+ // so we don't need to write `this._bridge.getSyncId.bind(this._bridge)`.
+ let syncID = await this._bridge.getSyncId();
+ return syncID;
+ },
+
+ async resetSyncID() {
+ await this._deleteServerCollection();
+ let newSyncID = await this.resetLocalSyncID();
+ return newSyncID;
+ },
+
+ async resetLocalSyncID() {
+ let newSyncID = await this._bridge.resetSyncId();
+ return newSyncID;
+ },
+
+ async ensureCurrentSyncID(newSyncID) {
+ let assignedSyncID = await this._bridge.ensureCurrentSyncId(newSyncID);
+ return assignedSyncID;
+ },
+
+ async getLastSync() {
+ // The bridge defines lastSync as integer ms, but sync itself wants to work
+ // in a float seconds with 2 decimal places.
+ let lastSyncMS = await this._bridge.lastSync();
+ return Math.round(lastSyncMS / 10) / 100;
+ },
+
+ async setLastSync(lastSyncSeconds) {
+ await this._bridge.setLastSync(Math.round(lastSyncSeconds * 1000));
+ },
+
+ /**
+ * Returns the initial changeset for the sync. Bridged engines handle
+ * reconciliation internally, so we don't know what changed until after we've
+ * stored and applied all incoming records. So we return an empty changeset
+ * here, and replace it with the real one in `_processIncoming`.
+ */
+ async pullChanges() {
+ return {};
+ },
+
+ async trackRemainingChanges() {
+ await this._bridge.syncFinished();
+ },
+
+ /**
+ * Marks a record for a hard-`DELETE` at the end of the sync. The base method
+ * also removes it from the tracker, but we don't use the tracker for that,
+ * so we override the method to just mark.
+ */
+ _deleteId(id) {
+ this._noteDeletedId(id);
+ },
+
+ /**
+ * Always stage incoming records, bypassing the base engine's reconciliation
+ * machinery.
+ */
+ async _reconcile() {
+ return true;
+ },
+
+ async _syncStartup() {
+ await super._syncStartup();
+ await this._bridge.syncStarted();
+ },
+
+ async _processIncoming(newitems) {
+ await super._processIncoming(newitems);
+
+ let outgoingBsosAsJSON = await this._bridge.apply();
+ let changeset = {};
+ for (let bsoAsJSON of outgoingBsosAsJSON) {
+ this._log.trace("outgoing bso", bsoAsJSON);
+ let record = BridgedRecord.fromOutgoingBso(
+ this.name,
+ JSON.parse(bsoAsJSON)
+ );
+ changeset[record.id] = {
+ synced: false,
+ record,
+ };
+ }
+ this._modified.replace(changeset);
+ },
+
+ /**
+ * Notify the bridged engine that we've successfully uploaded a batch, so
+ * that it can update its local state. For example, if the engine uses a
+ * mirror and a temp table for outgoing records, it can write the uploaded
+ * records from the outgoing table back to the mirror.
+ */
+ async _onRecordsWritten(succeeded, failed, serverModifiedTime) {
+ // JS uses seconds but Rust uses milliseconds so we'll need to convert
+ let serverModifiedMS = Math.round(serverModifiedTime * 1000);
+ await this._bridge.setUploaded(Math.floor(serverModifiedMS), succeeded);
+ },
+
+ async _createTombstone() {
+ throw new Error("Bridged engines don't support weak uploads");
+ },
+
+ async _createRecord(id) {
+ let change = this._modified.changes[id];
+ if (!change) {
+ throw new TypeError("Can't create record for unchanged item");
+ }
+ return change.record;
+ },
+
+ async _resetClient() {
+ await super._resetClient();
+ await this._bridge.reset();
+ },
+};
+Object.setPrototypeOf(BridgedEngine.prototype, SyncEngine.prototype);
+
+function transformError(code, message) {
+ switch (code) {
+ case Cr.NS_ERROR_ABORT:
+ return new InterruptedError(message);
+
+ default:
+ return new BridgeError(code, message);
+ }
+}
diff --git a/services/sync/modules/collection_validator.sys.mjs b/services/sync/modules/collection_validator.sys.mjs
new file mode 100644
index 0000000000..a64ede10e9
--- /dev/null
+++ b/services/sync/modules/collection_validator.sys.mjs
@@ -0,0 +1,267 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Async: "resource://services-common/async.sys.mjs",
+});
+
+export class CollectionProblemData {
+ constructor() {
+ this.missingIDs = 0;
+ this.clientDuplicates = [];
+ this.duplicates = [];
+ this.clientMissing = [];
+ this.serverMissing = [];
+ this.serverDeleted = [];
+ this.serverUnexpected = [];
+ this.differences = [];
+ }
+
+ /**
+ * Produce a list summarizing problems found. Each entry contains {name, count},
+ * where name is the field name for the problem, and count is the number of times
+ * the problem was encountered.
+ *
+ * Validation has failed if all counts are not 0.
+ */
+ getSummary() {
+ return [
+ { name: "clientMissing", count: this.clientMissing.length },
+ { name: "serverMissing", count: this.serverMissing.length },
+ { name: "serverDeleted", count: this.serverDeleted.length },
+ { name: "serverUnexpected", count: this.serverUnexpected.length },
+ { name: "differences", count: this.differences.length },
+ { name: "missingIDs", count: this.missingIDs },
+ { name: "clientDuplicates", count: this.clientDuplicates.length },
+ { name: "duplicates", count: this.duplicates.length },
+ ];
+ }
+}
+
+export class CollectionValidator {
+ // Construct a generic collection validator. This is intended to be called by
+ // subclasses.
+ // - name: Name of the engine
+ // - idProp: Property that identifies a record. That is, if a client and server
+ // record have the same value for the idProp property, they should be
+ // compared against eachother.
+ // - props: Array of properties that should be compared
+ constructor(name, idProp, props) {
+ this.name = name;
+ this.props = props;
+ this.idProp = idProp;
+
+ // This property deals with the fact that form history records are never
+ // deleted from the server. The FormValidator subclass needs to ignore the
+ // client missing records, and it uses this property to achieve it -
+ // (Bug 1354016).
+ this.ignoresMissingClients = false;
+ }
+
+ // Should a custom ProblemData type be needed, return it here.
+ emptyProblemData() {
+ return new CollectionProblemData();
+ }
+
+ async getServerItems(engine) {
+ let collection = engine.itemSource();
+ let collectionKey = engine.service.collectionKeys.keyForCollection(
+ engine.name
+ );
+ collection.full = true;
+ let result = await collection.getBatched();
+ if (!result.response.success) {
+ throw result.response;
+ }
+ let cleartexts = [];
+
+ await lazy.Async.yieldingForEach(result.records, async record => {
+ await record.decrypt(collectionKey);
+ cleartexts.push(record.cleartext);
+ });
+
+ return cleartexts;
+ }
+
+ // Should return a promise that resolves to an array of client items.
+ getClientItems() {
+ return Promise.reject("Must implement");
+ }
+
+ /**
+ * Can we guarantee validation will fail with a reason that isn't actually a
+ * problem? For example, if we know there are pending changes left over from
+ * the last sync, this should resolve to false. By default resolves to true.
+ */
+ async canValidate() {
+ return true;
+ }
+
+ // Turn the client item into something that can be compared with the server item,
+ // and is also safe to mutate.
+ normalizeClientItem(item) {
+ return Cu.cloneInto(item, {});
+ }
+
+ // Turn the server item into something that can be easily compared with the client
+ // items.
+ async normalizeServerItem(item) {
+ return item;
+ }
+
+ // Return whether or not a server item should be present on the client. Expected
+ // to be overridden.
+ clientUnderstands(item) {
+ return true;
+ }
+
+ // Return whether or not a client item should be present on the server. Expected
+ // to be overridden
+ async syncedByClient(item) {
+ return true;
+ }
+
+ // Compare the server item and the client item, and return a list of property
+ // names that are different. Can be overridden if needed.
+ getDifferences(client, server) {
+ let differences = [];
+ for (let prop of this.props) {
+ let clientProp = client[prop];
+ let serverProp = server[prop];
+ if ((clientProp || "") !== (serverProp || "")) {
+ differences.push(prop);
+ }
+ }
+ return differences;
+ }
+
+ // Returns an object containing
+ // problemData: an instance of the class returned by emptyProblemData(),
+ // clientRecords: Normalized client records
+ // records: Normalized server records,
+ // deletedRecords: Array of ids that were marked as deleted by the server.
+ async compareClientWithServer(clientItems, serverItems) {
+ const yieldState = lazy.Async.yieldState();
+
+ const clientRecords = [];
+
+ await lazy.Async.yieldingForEach(
+ clientItems,
+ item => {
+ clientRecords.push(this.normalizeClientItem(item));
+ },
+ yieldState
+ );
+
+ const serverRecords = [];
+ await lazy.Async.yieldingForEach(
+ serverItems,
+ async item => {
+ serverRecords.push(await this.normalizeServerItem(item));
+ },
+ yieldState
+ );
+
+ let problems = this.emptyProblemData();
+ let seenServer = new Map();
+ let serverDeleted = new Set();
+ let allRecords = new Map();
+
+ for (let record of serverRecords) {
+ let id = record[this.idProp];
+ if (!id) {
+ ++problems.missingIDs;
+ continue;
+ }
+ if (record.deleted) {
+ serverDeleted.add(record);
+ } else {
+ let serverHasPossibleDupe = seenServer.has(id);
+ if (serverHasPossibleDupe) {
+ problems.duplicates.push(id);
+ } else {
+ seenServer.set(id, record);
+ allRecords.set(id, { server: record, client: null });
+ }
+ record.understood = this.clientUnderstands(record);
+ }
+ }
+
+ let seenClient = new Map();
+ for (let record of clientRecords) {
+ let id = record[this.idProp];
+ record.shouldSync = await this.syncedByClient(record);
+ let clientHasPossibleDupe = seenClient.has(id);
+ if (clientHasPossibleDupe && record.shouldSync) {
+ // Only report duplicate client IDs for syncable records.
+ problems.clientDuplicates.push(id);
+ continue;
+ }
+ seenClient.set(id, record);
+ let combined = allRecords.get(id);
+ if (combined) {
+ combined.client = record;
+ } else {
+ allRecords.set(id, { client: record, server: null });
+ }
+ }
+
+ for (let [id, { server, client }] of allRecords) {
+ if (!client && !server) {
+ throw new Error("Impossible: no client or server record for " + id);
+ } else if (server && !client) {
+ if (!this.ignoresMissingClients && server.understood) {
+ problems.clientMissing.push(id);
+ }
+ } else if (client && !server) {
+ if (client.shouldSync) {
+ problems.serverMissing.push(id);
+ }
+ } else {
+ if (!client.shouldSync) {
+ if (!problems.serverUnexpected.includes(id)) {
+ problems.serverUnexpected.push(id);
+ }
+ continue;
+ }
+ let differences = this.getDifferences(client, server);
+ if (differences && differences.length) {
+ problems.differences.push({ id, differences });
+ }
+ }
+ }
+ return {
+ problemData: problems,
+ clientRecords,
+ records: serverRecords,
+ deletedRecords: [...serverDeleted],
+ };
+ }
+
+ async validate(engine) {
+ let start = Cu.now();
+ let clientItems = await this.getClientItems();
+ let serverItems = await this.getServerItems(engine);
+ let serverRecordCount = serverItems.length;
+ let result = await this.compareClientWithServer(clientItems, serverItems);
+ let end = Cu.now();
+ let duration = end - start;
+ engine._log.debug(`Validated ${this.name} in ${duration}ms`);
+ engine._log.debug(`Problem summary`);
+ for (let { name, count } of result.problemData.getSummary()) {
+ engine._log.debug(` ${name}: ${count}`);
+ }
+ return {
+ duration,
+ version: this.version,
+ problems: result.problemData,
+ recordCount: serverRecordCount,
+ };
+ }
+}
+
+// Default to 0, some engines may override.
+CollectionValidator.prototype.version = 0;
diff --git a/services/sync/modules/constants.sys.mjs b/services/sync/modules/constants.sys.mjs
new file mode 100644
index 0000000000..35c0ac2f0b
--- /dev/null
+++ b/services/sync/modules/constants.sys.mjs
@@ -0,0 +1,133 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Don't manually modify this line, as it is automatically replaced on merge day
+// by the gecko_migration.py script.
+export const WEAVE_VERSION = "1.126.0";
+
+// Sync Server API version that the client supports.
+export const SYNC_API_VERSION = "1.5";
+
+// Version of the data format this client supports. The data format describes
+// how records are packaged; this is separate from the Server API version and
+// the per-engine cleartext formats.
+export const STORAGE_VERSION = 5;
+export const PREFS_BRANCH = "services.sync.";
+
+// Put in [] because those aren't allowed in a collection name.
+export const DEFAULT_KEYBUNDLE_NAME = "[default]";
+
+// Key dimensions.
+export const SYNC_KEY_ENCODED_LENGTH = 26;
+export const SYNC_KEY_DECODED_LENGTH = 16;
+
+export const NO_SYNC_NODE_INTERVAL = 10 * 60 * 1000; // 10 minutes
+
+export const MAX_ERROR_COUNT_BEFORE_BACKOFF = 3;
+
+// Backoff intervals
+export const MINIMUM_BACKOFF_INTERVAL = 15 * 60 * 1000; // 15 minutes
+export const MAXIMUM_BACKOFF_INTERVAL = 8 * 60 * 60 * 1000; // 8 hours
+
+// HMAC event handling timeout.
+// 10 minutes = a compromise between the multi-desktop sync interval
+// and the mobile sync interval.
+export const HMAC_EVENT_INTERVAL = 600000;
+
+// How long to wait between sync attempts if the Master Password is locked.
+export const MASTER_PASSWORD_LOCKED_RETRY_INTERVAL = 15 * 60 * 1000; // 15 minutes
+
+// 50 is hardcoded here because of URL length restrictions.
+// (GUIDs can be up to 64 chars long.)
+// Individual engines can set different values for their limit if their
+// identifiers are shorter.
+export const DEFAULT_GUID_FETCH_BATCH_SIZE = 50;
+
+// Default batch size for download batching
+// (how many records are fetched at a time from the server when batching is used).
+export const DEFAULT_DOWNLOAD_BATCH_SIZE = 1000;
+
+// score thresholds for early syncs
+export const SINGLE_USER_THRESHOLD = 1000;
+export const MULTI_DEVICE_THRESHOLD = 300;
+
+// Other score increment constants
+export const SCORE_INCREMENT_SMALL = 1;
+export const SCORE_INCREMENT_MEDIUM = 10;
+
+// Instant sync score increment
+export const SCORE_INCREMENT_XLARGE = 300 + 1; //MULTI_DEVICE_THRESHOLD + 1
+
+// Delay before incrementing global score
+export const SCORE_UPDATE_DELAY = 100;
+
+// Delay for the back observer debouncer. This is chosen to be longer than any
+// observed spurious idle/back events and short enough to pre-empt user activity.
+export const IDLE_OBSERVER_BACK_DELAY = 100;
+
+// Duplicate URI_LENGTH_MAX from Places (from nsNavHistory.h), used to discard
+// tabs with huge uris during tab sync.
+export const URI_LENGTH_MAX = 65536;
+
+export const MAX_HISTORY_UPLOAD = 5000;
+export const MAX_HISTORY_DOWNLOAD = 5000;
+
+// Top-level statuses
+export const STATUS_OK = "success.status_ok";
+export const SYNC_FAILED = "error.sync.failed";
+export const LOGIN_FAILED = "error.login.failed";
+export const SYNC_FAILED_PARTIAL = "error.sync.failed_partial";
+export const CLIENT_NOT_CONFIGURED = "service.client_not_configured";
+export const STATUS_DISABLED = "service.disabled";
+export const MASTER_PASSWORD_LOCKED = "service.master_password_locked";
+
+// success states
+export const LOGIN_SUCCEEDED = "success.login";
+export const SYNC_SUCCEEDED = "success.sync";
+export const ENGINE_SUCCEEDED = "success.engine";
+
+// login failure status codes
+export const LOGIN_FAILED_NO_USERNAME = "error.login.reason.no_username";
+export const LOGIN_FAILED_NO_PASSPHRASE = "error.login.reason.no_recoverykey";
+export const LOGIN_FAILED_NETWORK_ERROR = "error.login.reason.network";
+export const LOGIN_FAILED_SERVER_ERROR = "error.login.reason.server";
+export const LOGIN_FAILED_INVALID_PASSPHRASE = "error.login.reason.recoverykey";
+export const LOGIN_FAILED_LOGIN_REJECTED = "error.login.reason.account";
+
+// sync failure status codes
+export const METARECORD_DOWNLOAD_FAIL =
+ "error.sync.reason.metarecord_download_fail";
+export const VERSION_OUT_OF_DATE = "error.sync.reason.version_out_of_date";
+export const CREDENTIALS_CHANGED = "error.sync.reason.credentials_changed";
+export const ABORT_SYNC_COMMAND = "aborting sync, process commands said so";
+export const NO_SYNC_NODE_FOUND = "error.sync.reason.no_node_found";
+export const OVER_QUOTA = "error.sync.reason.over_quota";
+export const SERVER_MAINTENANCE = "error.sync.reason.serverMaintenance";
+
+export const RESPONSE_OVER_QUOTA = "14";
+
+// engine failure status codes
+export const ENGINE_UPLOAD_FAIL = "error.engine.reason.record_upload_fail";
+export const ENGINE_DOWNLOAD_FAIL = "error.engine.reason.record_download_fail";
+export const ENGINE_UNKNOWN_FAIL = "error.engine.reason.unknown_fail";
+export const ENGINE_APPLY_FAIL = "error.engine.reason.apply_fail";
+// an upload failure where the batch was interrupted with a 412
+export const ENGINE_BATCH_INTERRUPTED = "error.engine.reason.batch_interrupted";
+
+// Ways that a sync can be disabled (messages only to be printed in debug log)
+export const kSyncMasterPasswordLocked =
+ "User elected to leave Primary Password locked";
+export const kSyncWeaveDisabled = "Weave is disabled";
+export const kSyncNetworkOffline = "Network is offline";
+export const kSyncBackoffNotMet =
+ "Trying to sync before the server said it's okay";
+export const kFirstSyncChoiceNotMade =
+ "User has not selected an action for first sync";
+export const kSyncNotConfigured = "Sync is not configured";
+export const kFirefoxShuttingDown = "Firefox is about to shut down";
+
+export const DEVICE_TYPE_DESKTOP = "desktop";
+export const DEVICE_TYPE_MOBILE = "mobile";
+
+export const SQLITE_MAX_VARIABLE_NUMBER = 999;
diff --git a/services/sync/modules/doctor.sys.mjs b/services/sync/modules/doctor.sys.mjs
new file mode 100644
index 0000000000..ebcf38e2a2
--- /dev/null
+++ b/services/sync/modules/doctor.sys.mjs
@@ -0,0 +1,201 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// A doctor for our collections. She can be asked to make a consultation, and
+// may just diagnose an issue without attempting to cure it, may diagnose and
+// attempt to cure, or may decide she is overworked and underpaid.
+// Or something - naming is hard :)
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { Observers } from "resource://services-common/observers.sys.mjs";
+import { Service } from "resource://services-sync/service.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { Svc } from "resource://services-sync/util.sys.mjs";
+
+const log = Log.repository.getLogger("Sync.Doctor");
+
+export var Doctor = {
+ async consult(recentlySyncedEngines) {
+ if (!Services.telemetry.canRecordBase) {
+ log.info("Skipping consultation: telemetry reporting is disabled");
+ return;
+ }
+
+ let engineInfos = this._getEnginesToValidate(recentlySyncedEngines);
+
+ await this._runValidators(engineInfos);
+ },
+
+ _getEnginesToValidate(recentlySyncedEngines) {
+ let result = {};
+ for (let e of recentlySyncedEngines) {
+ let prefPrefix = `engine.${e.name}.`;
+ if (
+ !Svc.PrefBranch.getBoolPref(prefPrefix + "validation.enabled", false)
+ ) {
+ log.info(`Skipping check of ${e.name} - disabled via preferences`);
+ continue;
+ }
+ // Check the last validation time for the engine.
+ let lastValidation = Svc.PrefBranch.getIntPref(
+ prefPrefix + "validation.lastTime",
+ 0
+ );
+ let validationInterval = Svc.PrefBranch.getIntPref(
+ prefPrefix + "validation.interval"
+ );
+ let nowSeconds = this._now();
+
+ if (nowSeconds - lastValidation < validationInterval) {
+ log.info(
+ `Skipping validation of ${e.name}: too recent since last validation attempt`
+ );
+ continue;
+ }
+ // Update the time now, even if we decline to actually perform a
+ // validation. We don't want to check the rest of these more frequently
+ // than once a day.
+ Svc.PrefBranch.setIntPref(
+ prefPrefix + "validation.lastTime",
+ Math.floor(nowSeconds)
+ );
+
+ // Validation only occurs a certain percentage of the time.
+ let validationProbability =
+ Svc.PrefBranch.getIntPref(
+ prefPrefix + "validation.percentageChance",
+ 0
+ ) / 100.0;
+ if (validationProbability < Math.random()) {
+ log.info(
+ `Skipping validation of ${e.name}: Probability threshold not met`
+ );
+ continue;
+ }
+
+ let maxRecords = Svc.PrefBranch.getIntPref(
+ prefPrefix + "validation.maxRecords"
+ );
+ if (!maxRecords) {
+ log.info(`Skipping validation of ${e.name}: No maxRecords specified`);
+ continue;
+ }
+ // OK, so this is a candidate - the final decision will be based on the
+ // number of records actually found.
+ result[e.name] = { engine: e, maxRecords };
+ }
+ return result;
+ },
+
+ async _runValidators(engineInfos) {
+ if (!Object.keys(engineInfos).length) {
+ log.info("Skipping validation: no engines qualify");
+ return;
+ }
+
+ if (Object.values(engineInfos).filter(i => i.maxRecords != -1).length) {
+ // at least some of the engines have maxRecord restrictions which require
+ // us to ask the server for the counts.
+ let countInfo = await this._fetchCollectionCounts();
+ for (let [engineName, recordCount] of Object.entries(countInfo)) {
+ if (engineName in engineInfos) {
+ engineInfos[engineName].recordCount = recordCount;
+ }
+ }
+ }
+
+ for (let [
+ engineName,
+ { engine, maxRecords, recordCount },
+ ] of Object.entries(engineInfos)) {
+ // maxRecords of -1 means "any number", so we can skip asking the server.
+ // Used for tests.
+ if (maxRecords >= 0 && recordCount > maxRecords) {
+ log.debug(
+ `Skipping validation for ${engineName} because ` +
+ `the number of records (${recordCount}) is greater ` +
+ `than the maximum allowed (${maxRecords}).`
+ );
+ continue;
+ }
+ let validator = engine.getValidator();
+ if (!validator) {
+ // This is probably only possible in profile downgrade cases.
+ log.warn(
+ `engine.getValidator returned null for ${engineName} but the pref that controls validation is enabled.`
+ );
+ continue;
+ }
+
+ if (!(await validator.canValidate())) {
+ log.debug(
+ `Skipping validation for ${engineName} because validator.canValidate() is false`
+ );
+ continue;
+ }
+
+ // Let's do it!
+ Services.console.logStringMessage(
+ `Sync is about to run a consistency check of ${engine.name}. This may be slow, and ` +
+ `can be controlled using the pref "services.sync.${engine.name}.validation.enabled".\n` +
+ `If you encounter any problems because of this, please file a bug.`
+ );
+
+ try {
+ log.info(`Running validator for ${engine.name}`);
+ let result = await validator.validate(engine);
+ let { problems, version, duration, recordCount } = result;
+ Observers.notify(
+ "weave:engine:validate:finish",
+ {
+ version,
+ checked: recordCount,
+ took: duration,
+ problems: problems ? problems.getSummary(true) : null,
+ },
+ engine.name
+ );
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ log.error(`Failed to run validation on ${engine.name}!`, ex);
+ Observers.notify("weave:engine:validate:error", ex, engine.name);
+ // Keep validating -- there's no reason to think that a failure for one
+ // validator would mean the others will fail.
+ }
+ }
+ },
+
+ // mainly for mocking.
+ async _fetchCollectionCounts() {
+ let collectionCountsURL = Service.userBaseURL + "info/collection_counts";
+ try {
+ let infoResp = await Service._fetchInfo(collectionCountsURL);
+ if (!infoResp.success) {
+ log.error(
+ "Can't fetch collection counts: request to info/collection_counts responded with " +
+ infoResp.status
+ );
+ return {};
+ }
+ return infoResp.obj; // might throw because obj is a getter which parses json.
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ // Not running validation is totally fine, so we just write an error log and return.
+ log.error("Caught error when fetching counts", ex);
+ return {};
+ }
+ },
+
+ // functions used so tests can mock them
+ _now() {
+ // We use the server time, which is SECONDS
+ return Resource.serverTime;
+ },
+};
diff --git a/services/sync/modules/engines.sys.mjs b/services/sync/modules/engines.sys.mjs
new file mode 100644
index 0000000000..0d490ac4b3
--- /dev/null
+++ b/services/sync/modules/engines.sys.mjs
@@ -0,0 +1,2274 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { JSONFile } from "resource://gre/modules/JSONFile.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { Observers } from "resource://services-common/observers.sys.mjs";
+
+import {
+ DEFAULT_DOWNLOAD_BATCH_SIZE,
+ DEFAULT_GUID_FETCH_BATCH_SIZE,
+ ENGINE_BATCH_INTERRUPTED,
+ ENGINE_DOWNLOAD_FAIL,
+ ENGINE_UPLOAD_FAIL,
+ VERSION_OUT_OF_DATE,
+ PREFS_BRANCH,
+} from "resource://services-sync/constants.sys.mjs";
+
+import {
+ Collection,
+ CryptoWrapper,
+} from "resource://services-sync/record.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import {
+ SerializableSet,
+ Svc,
+ Utils,
+} from "resource://services-sync/util.sys.mjs";
+import { SyncedRecordsTelemetry } from "resource://services-sync/telemetry.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+});
+
+function ensureDirectory(path) {
+ return IOUtils.makeDirectory(PathUtils.parent(path), {
+ createAncestors: true,
+ });
+}
+
+/**
+ * Trackers are associated with a single engine and deal with
+ * listening for changes to their particular data type.
+ *
+ * The base `Tracker` only supports listening for changes, and bumping the score
+ * to indicate how urgently the engine wants to sync. It does not persist any
+ * data. Engines that track changes directly in the storage layer (like
+ * bookmarks, bridged engines, addresses, and credit cards) or only upload a
+ * single record (tabs and preferences) should subclass `Tracker`.
+ */
+export function Tracker(name, engine) {
+ if (!engine) {
+ throw new Error("Tracker must be associated with an Engine instance.");
+ }
+
+ name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.engine = engine;
+
+ this._log = Log.repository.getLogger(`Sync.Engine.${name}.Tracker`);
+
+ this._score = 0;
+
+ this.asyncObserver = Async.asyncObserver(this, this._log);
+}
+
+Tracker.prototype = {
+ // New-style trackers use change sources to filter out changes made by Sync in
+ // observer notifications, so we don't want to let the engine ignore all
+ // changes during a sync.
+ get ignoreAll() {
+ return false;
+ },
+
+ // Define an empty setter so that the engine doesn't throw a `TypeError`
+ // setting a read-only property.
+ set ignoreAll(value) {},
+
+ /*
+ * Score can be called as often as desired to decide which engines to sync
+ *
+ * Valid values for score:
+ * -1: Do not sync unless the user specifically requests it (almost disabled)
+ * 0: Nothing has changed
+ * 100: Please sync me ASAP!
+ *
+ * Setting it to other values should (but doesn't currently) throw an exception
+ */
+ get score() {
+ return this._score;
+ },
+
+ set score(value) {
+ this._score = value;
+ Observers.notify("weave:engine:score:updated", this.name);
+ },
+
+ // Should be called by service everytime a sync has been done for an engine
+ resetScore() {
+ this._score = 0;
+ },
+
+ // Unsupported, and throws a more descriptive error to ensure callers aren't
+ // accidentally using persistence.
+ async getChangedIDs() {
+ throw new TypeError("This tracker doesn't store changed IDs");
+ },
+
+ // Also unsupported.
+ async addChangedID(id, when) {
+ throw new TypeError("Can't add changed ID to this tracker");
+ },
+
+ // Ditto.
+ async removeChangedID(...ids) {
+ throw new TypeError("Can't remove changed IDs from this tracker");
+ },
+
+ // This method is called at various times, so we override with a no-op
+ // instead of throwing.
+ clearChangedIDs() {},
+
+ _now() {
+ return Date.now() / 1000;
+ },
+
+ _isTracking: false,
+
+ start() {
+ if (!this.engineIsEnabled()) {
+ return;
+ }
+ this._log.trace("start().");
+ if (!this._isTracking) {
+ this.onStart();
+ this._isTracking = true;
+ }
+ },
+
+ async stop() {
+ this._log.trace("stop().");
+ if (this._isTracking) {
+ await this.asyncObserver.promiseObserversComplete();
+ this.onStop();
+ this._isTracking = false;
+ }
+ },
+
+ // Override these in your subclasses.
+ onStart() {},
+ onStop() {},
+ async observe(subject, topic, data) {},
+
+ engineIsEnabled() {
+ if (!this.engine) {
+ // Can't tell -- we must be running in a test!
+ return true;
+ }
+ return this.engine.enabled;
+ },
+
+ /**
+ * Starts or stops listening for changes depending on the associated engine's
+ * enabled state.
+ *
+ * @param {Boolean} engineEnabled Whether the engine was enabled.
+ */
+ async onEngineEnabledChanged(engineEnabled) {
+ if (engineEnabled == this._isTracking) {
+ return;
+ }
+
+ if (engineEnabled) {
+ this.start();
+ } else {
+ await this.stop();
+ this.clearChangedIDs();
+ }
+ },
+
+ async finalize() {
+ await this.stop();
+ },
+};
+
+/*
+ * A tracker that persists a list of IDs for all changed items that need to be
+ * synced. This is 🚨 _extremely deprecated_ 🚨 and only kept around for current
+ * engines. ⚠️ Please **don't use it** for new engines! ⚠️
+ *
+ * Why is this kind of external change tracking deprecated? Because it causes
+ * consistency issues due to missed notifications, interrupted syncs, and the
+ * tracker's view of what changed diverging from the data store's.
+ */
+export function LegacyTracker(name, engine) {
+ Tracker.call(this, name, engine);
+
+ this._ignored = [];
+ this.file = this.name;
+ this._storage = new JSONFile({
+ path: Utils.jsonFilePath("changes", this.file),
+ dataPostProcessor: json => this._dataPostProcessor(json),
+ beforeSave: () => this._beforeSave(),
+ });
+ this._ignoreAll = false;
+}
+
+LegacyTracker.prototype = {
+ get ignoreAll() {
+ return this._ignoreAll;
+ },
+
+ set ignoreAll(value) {
+ this._ignoreAll = value;
+ },
+
+ // Default to an empty object if the file doesn't exist.
+ _dataPostProcessor(json) {
+ return (typeof json == "object" && json) || {};
+ },
+
+ // Ensure the Weave storage directory exists before writing the file.
+ _beforeSave() {
+ return ensureDirectory(this._storage.path);
+ },
+
+ async getChangedIDs() {
+ await this._storage.load();
+ return this._storage.data;
+ },
+
+ _saveChangedIDs() {
+ this._storage.saveSoon();
+ },
+
+ // ignore/unignore specific IDs. Useful for ignoring items that are
+ // being processed, or that shouldn't be synced.
+ // But note: not persisted to disk
+
+ ignoreID(id) {
+ this.unignoreID(id);
+ this._ignored.push(id);
+ },
+
+ unignoreID(id) {
+ let index = this._ignored.indexOf(id);
+ if (index != -1) {
+ this._ignored.splice(index, 1);
+ }
+ },
+
+ async _saveChangedID(id, when) {
+ this._log.trace(`Adding changed ID: ${id}, ${JSON.stringify(when)}`);
+ const changedIDs = await this.getChangedIDs();
+ changedIDs[id] = when;
+ this._saveChangedIDs();
+ },
+
+ async addChangedID(id, when) {
+ if (!id) {
+ this._log.warn("Attempted to add undefined ID to tracker");
+ return false;
+ }
+
+ if (this.ignoreAll || this._ignored.includes(id)) {
+ return false;
+ }
+
+ // Default to the current time in seconds if no time is provided.
+ if (when == null) {
+ when = this._now();
+ }
+
+ const changedIDs = await this.getChangedIDs();
+ // Add/update the entry if we have a newer time.
+ if ((changedIDs[id] || -Infinity) < when) {
+ await this._saveChangedID(id, when);
+ }
+
+ return true;
+ },
+
+ async removeChangedID(...ids) {
+ if (!ids.length || this.ignoreAll) {
+ return false;
+ }
+ for (let id of ids) {
+ if (!id) {
+ this._log.warn("Attempted to remove undefined ID from tracker");
+ continue;
+ }
+ if (this._ignored.includes(id)) {
+ this._log.debug(`Not removing ignored ID ${id} from tracker`);
+ continue;
+ }
+ const changedIDs = await this.getChangedIDs();
+ if (changedIDs[id] != null) {
+ this._log.trace("Removing changed ID " + id);
+ delete changedIDs[id];
+ }
+ }
+ this._saveChangedIDs();
+ return true;
+ },
+
+ clearChangedIDs() {
+ this._log.trace("Clearing changed ID list");
+ this._storage.data = {};
+ this._saveChangedIDs();
+ },
+
+ async finalize() {
+ // Persist all pending tracked changes to disk, and wait for the final write
+ // to finish.
+ await super.finalize();
+ this._saveChangedIDs();
+ await this._storage.finalize();
+ },
+};
+Object.setPrototypeOf(LegacyTracker.prototype, Tracker.prototype);
+
+/**
+ * The Store serves as the interface between Sync and stored data.
+ *
+ * The name "store" is slightly a misnomer because it doesn't actually "store"
+ * anything. Instead, it serves as a gateway to something that actually does
+ * the "storing."
+ *
+ * The store is responsible for record management inside an engine. It tells
+ * Sync what items are available for Sync, converts items to and from Sync's
+ * record format, and applies records from Sync into changes on the underlying
+ * store.
+ *
+ * Store implementations require a number of functions to be implemented. These
+ * are all documented below.
+ *
+ * For stores that deal with many records or which have expensive store access
+ * routines, it is highly recommended to implement a custom applyIncomingBatch
+ * and/or applyIncoming function on top of the basic APIs.
+ */
+
+export function Store(name, engine) {
+ if (!engine) {
+ throw new Error("Store must be associated with an Engine instance.");
+ }
+
+ name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.engine = engine;
+
+ this._log = Log.repository.getLogger(`Sync.Engine.${name}.Store`);
+
+ ChromeUtils.defineLazyGetter(this, "_timer", function () {
+ return Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ });
+}
+
+Store.prototype = {
+ /**
+ * Apply multiple incoming records against the store.
+ *
+ * This is called with a set of incoming records to process. The function
+ * should look at each record, reconcile with the current local state, and
+ * make the local changes required to bring its state in alignment with the
+ * record.
+ *
+ * The default implementation simply iterates over all records and calls
+ * applyIncoming(). Store implementations may overwrite this function
+ * if desired.
+ *
+ * @param records Array of records to apply
+ * @param a SyncedRecordsTelemetry obj that will keep track of failed reasons
+ * @return Array of record IDs which did not apply cleanly
+ */
+ async applyIncomingBatch(records, countTelemetry) {
+ let failed = [];
+
+ await Async.yieldingForEach(records, async record => {
+ try {
+ await this.applyIncoming(record);
+ } catch (ex) {
+ if (ex.code == SyncEngine.prototype.eEngineAbortApplyIncoming) {
+ // This kind of exception should have a 'cause' attribute, which is an
+ // originating exception.
+ // ex.cause will carry its stack with it when rethrown.
+ throw ex.cause;
+ }
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Failed to apply incoming record " + record.id, ex);
+ failed.push(record.id);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+ });
+
+ return failed;
+ },
+
+ /**
+ * Apply a single record against the store.
+ *
+ * This takes a single record and makes the local changes required so the
+ * local state matches what's in the record.
+ *
+ * The default implementation calls one of remove(), create(), or update()
+ * depending on the state obtained from the store itself. Store
+ * implementations may overwrite this function if desired.
+ *
+ * @param record
+ * Record to apply
+ */
+ async applyIncoming(record) {
+ if (record.deleted) {
+ await this.remove(record);
+ } else if (!(await this.itemExists(record.id))) {
+ await this.create(record);
+ } else {
+ await this.update(record);
+ }
+ },
+
+ // override these in derived objects
+
+ /**
+ * Create an item in the store from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The store record to create an item from
+ */
+ async create(record) {
+ throw new Error("override create in a subclass");
+ },
+
+ /**
+ * Remove an item in the store from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The store record to delete an item from
+ */
+ async remove(record) {
+ throw new Error("override remove in a subclass");
+ },
+
+ /**
+ * Update an item from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The record to use to update an item from
+ */
+ async update(record) {
+ throw new Error("override update in a subclass");
+ },
+
+ /**
+ * Determine whether a record with the specified ID exists.
+ *
+ * Takes a string record ID and returns a booleans saying whether the record
+ * exists.
+ *
+ * @param id
+ * string record ID
+ * @return boolean indicating whether record exists locally
+ */
+ async itemExists(id) {
+ throw new Error("override itemExists in a subclass");
+ },
+
+ /**
+ * Create a record from the specified ID.
+ *
+ * If the ID is known, the record should be populated with metadata from
+ * the store. If the ID is not known, the record should be created with the
+ * delete field set to true.
+ *
+ * @param id
+ * string record ID
+ * @param collection
+ * Collection to add record to. This is typically passed into the
+ * constructor for the newly-created record.
+ * @return record type for this engine
+ */
+ async createRecord(id, collection) {
+ throw new Error("override createRecord in a subclass");
+ },
+
+ /**
+ * Change the ID of a record.
+ *
+ * @param oldID
+ * string old/current record ID
+ * @param newID
+ * string new record ID
+ */
+ async changeItemID(oldID, newID) {
+ throw new Error("override changeItemID in a subclass");
+ },
+
+ /**
+ * Obtain the set of all known record IDs.
+ *
+ * @return Object with ID strings as keys and values of true. The values
+ * are ignored.
+ */
+ async getAllIDs() {
+ throw new Error("override getAllIDs in a subclass");
+ },
+
+ /**
+ * Wipe all data in the store.
+ *
+ * This function is called during remote wipes or when replacing local data
+ * with remote data.
+ *
+ * This function should delete all local data that the store is managing. It
+ * can be thought of as clearing out all state and restoring the "new
+ * browser" state.
+ */
+ async wipe() {
+ throw new Error("override wipe in a subclass");
+ },
+};
+
+export function EngineManager(service) {
+ this.service = service;
+
+ this._engines = {};
+
+ this._altEngineInfo = {};
+
+ // This will be populated by Service on startup.
+ this._declined = new Set();
+ this._log = Log.repository.getLogger("Sync.EngineManager");
+ this._log.manageLevelFromPref("services.sync.log.logger.service.engines");
+ // define the default level for all engine logs here (although each engine
+ // allows its level to be controlled via a specific, non-default pref)
+ Log.repository
+ .getLogger(`Sync.Engine`)
+ .manageLevelFromPref("services.sync.log.logger.engine");
+}
+
+EngineManager.prototype = {
+ get(name) {
+ // Return an array of engines if we have an array of names
+ if (Array.isArray(name)) {
+ let engines = [];
+ name.forEach(function (name) {
+ let engine = this.get(name);
+ if (engine) {
+ engines.push(engine);
+ }
+ }, this);
+ return engines;
+ }
+
+ return this._engines[name]; // Silently returns undefined for unknown names.
+ },
+
+ getAll() {
+ let engines = [];
+ for (let [, engine] of Object.entries(this._engines)) {
+ engines.push(engine);
+ }
+ return engines;
+ },
+
+ /**
+ * If a user has changed a pref that controls which variant of a sync engine
+ * for a given collection we use, unregister the old engine and register the
+ * new one.
+ *
+ * This is called by EngineSynchronizer before every sync.
+ */
+ async switchAlternatives() {
+ for (let [name, info] of Object.entries(this._altEngineInfo)) {
+ let prefValue = info.prefValue;
+ if (prefValue === info.lastValue) {
+ this._log.trace(
+ `No change for engine ${name} (${info.pref} is still ${prefValue})`
+ );
+ continue;
+ }
+ // Unregister the old engine, register the new one.
+ this._log.info(
+ `Switching ${name} engine ("${info.pref}" went from ${info.lastValue} => ${prefValue})`
+ );
+ try {
+ await this._removeAndFinalize(name);
+ } catch (e) {
+ this._log.warn(`Failed to remove previous ${name} engine...`, e);
+ }
+ let engineType = prefValue ? info.whenTrue : info.whenFalse;
+ try {
+ // If register throws, we'll try again next sync, but until then there
+ // won't be an engine registered for this collection.
+ await this.register(engineType);
+ info.lastValue = prefValue;
+ // Note: engineType.name is using Function.prototype.name.
+ this._log.info(`Switched the ${name} engine to use ${engineType.name}`);
+ } catch (e) {
+ this._log.warn(
+ `Switching the ${name} engine to use ${engineType.name} failed (couldn't register)`,
+ e
+ );
+ }
+ }
+ },
+
+ async registerAlternatives(name, pref, whenTrue, whenFalse) {
+ let info = { name, pref, whenTrue, whenFalse };
+
+ XPCOMUtils.defineLazyPreferenceGetter(info, "prefValue", pref, false);
+
+ let chosen = info.prefValue ? info.whenTrue : info.whenFalse;
+ info.lastValue = info.prefValue;
+ this._altEngineInfo[name] = info;
+
+ await this.register(chosen);
+ },
+
+ /**
+ * N.B., does not pay attention to the declined list.
+ */
+ getEnabled() {
+ return this.getAll()
+ .filter(engine => engine.enabled)
+ .sort((a, b) => a.syncPriority - b.syncPriority);
+ },
+
+ get enabledEngineNames() {
+ return this.getEnabled().map(e => e.name);
+ },
+
+ persistDeclined() {
+ Svc.PrefBranch.setStringPref(
+ "declinedEngines",
+ [...this._declined].join(",")
+ );
+ },
+
+ /**
+ * Returns an array.
+ */
+ getDeclined() {
+ return [...this._declined];
+ },
+
+ setDeclined(engines) {
+ this._declined = new Set(engines);
+ this.persistDeclined();
+ },
+
+ isDeclined(engineName) {
+ return this._declined.has(engineName);
+ },
+
+ /**
+ * Accepts a Set or an array.
+ */
+ decline(engines) {
+ for (let e of engines) {
+ this._declined.add(e);
+ }
+ this.persistDeclined();
+ },
+
+ undecline(engines) {
+ for (let e of engines) {
+ this._declined.delete(e);
+ }
+ this.persistDeclined();
+ },
+
+ /**
+ * Register an Engine to the service. Alternatively, give an array of engine
+ * objects to register.
+ *
+ * @param engineObject
+ * Engine object used to get an instance of the engine
+ * @return The engine object if anything failed
+ */
+ async register(engineObject) {
+ if (Array.isArray(engineObject)) {
+ for (const e of engineObject) {
+ await this.register(e);
+ }
+ return;
+ }
+
+ try {
+ let engine = new engineObject(this.service);
+ let name = engine.name;
+ if (name in this._engines) {
+ this._log.error("Engine '" + name + "' is already registered!");
+ } else {
+ if (engine.initialize) {
+ await engine.initialize();
+ }
+ this._engines[name] = engine;
+ }
+ } catch (ex) {
+ let name = engineObject || "";
+ name = name.prototype || "";
+ name = name.name || "";
+
+ this._log.error(`Could not initialize engine ${name}`, ex);
+ }
+ },
+
+ async unregister(val) {
+ let name = val;
+ if (val instanceof SyncEngine) {
+ name = val.name;
+ }
+ await this._removeAndFinalize(name);
+ delete this._altEngineInfo[name];
+ },
+
+ // Common code for disabling an engine by name, that doesn't complain if the
+ // engine doesn't exist. Doesn't touch the engine's alternative info (if any
+ // exists).
+ async _removeAndFinalize(name) {
+ if (name in this._engines) {
+ let engine = this._engines[name];
+ delete this._engines[name];
+ await engine.finalize();
+ }
+ },
+
+ async clear() {
+ for (let name in this._engines) {
+ let engine = this._engines[name];
+ delete this._engines[name];
+ await engine.finalize();
+ }
+ this._altEngineInfo = {};
+ },
+};
+
+export function SyncEngine(name, service) {
+ if (!service) {
+ throw new Error("SyncEngine must be associated with a Service instance.");
+ }
+
+ this.Name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.service = service;
+
+ this._notify = Utils.notify("weave:engine:");
+ this._log = Log.repository.getLogger("Sync.Engine." + this.Name);
+ this._log.manageLevelFromPref(`services.sync.log.logger.engine.${this.name}`);
+
+ this._modified = this.emptyChangeset();
+ this._tracker; // initialize tracker to load previously changed IDs
+ this._log.debug("Engine constructed");
+
+ this._toFetchStorage = new JSONFile({
+ path: Utils.jsonFilePath("toFetch", this.name),
+ dataPostProcessor: json => this._metadataPostProcessor(json),
+ beforeSave: () => this._beforeSaveMetadata(),
+ });
+
+ this._previousFailedStorage = new JSONFile({
+ path: Utils.jsonFilePath("failed", this.name),
+ dataPostProcessor: json => this._metadataPostProcessor(json),
+ beforeSave: () => this._beforeSaveMetadata(),
+ });
+
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_enabled",
+ `services.sync.engine.${this.prefName}`,
+ false
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_syncID",
+ `services.sync.${this.name}.syncID`,
+ ""
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_lastSync",
+ `services.sync.${this.name}.lastSync`,
+ "0",
+ null,
+ v => parseFloat(v)
+ );
+ // Async initializations can be made in the initialize() method.
+
+ this.asyncObserver = Async.asyncObserver(this, this._log);
+}
+
+// Enumeration to define approaches to handling bad records.
+// Attached to the constructor to allow use as a kind of static enumeration.
+SyncEngine.kRecoveryStrategy = {
+ ignore: "ignore",
+ retry: "retry",
+ error: "error",
+};
+
+SyncEngine.prototype = {
+ _recordObj: CryptoWrapper,
+ // _storeObj, and _trackerObj should to be overridden in subclasses
+ _storeObj: Store,
+ _trackerObj: Tracker,
+ version: 1,
+
+ // Local 'constant'.
+ // Signal to the engine that processing further records is pointless.
+ eEngineAbortApplyIncoming: "error.engine.abort.applyincoming",
+
+ // Should we keep syncing if we find a record that cannot be uploaded (ever)?
+ // If this is false, we'll throw, otherwise, we'll ignore the record and
+ // continue. This currently can only happen due to the record being larger
+ // than the record upload limit.
+ allowSkippedRecord: true,
+
+ // Which sortindex to use when retrieving records for this engine.
+ _defaultSort: undefined,
+
+ _hasSyncedThisSession: false,
+
+ _metadataPostProcessor(json) {
+ if (Array.isArray(json)) {
+ // Pre-`JSONFile` storage stored an array, but `JSONFile` defaults to
+ // an object, so we wrap the array for consistency.
+ json = { ids: json };
+ }
+ if (!json.ids) {
+ json.ids = [];
+ }
+ // The set serializes the same way as an array, but offers more efficient
+ // methods of manipulation.
+ json.ids = new SerializableSet(json.ids);
+ return json;
+ },
+
+ async _beforeSaveMetadata() {
+ await ensureDirectory(this._toFetchStorage.path);
+ await ensureDirectory(this._previousFailedStorage.path);
+ },
+
+ // A relative priority to use when computing an order
+ // for engines to be synced. Higher-priority engines
+ // (lower numbers) are synced first.
+ // It is recommended that a unique value be used for each engine,
+ // in order to guarantee a stable sequence.
+ syncPriority: 0,
+
+ // How many records to pull in a single sync. This is primarily to avoid very
+ // long first syncs against profiles with many history records.
+ downloadLimit: null,
+
+ // How many records to pull at one time when specifying IDs. This is to avoid
+ // URI length limitations.
+ guidFetchBatchSize: DEFAULT_GUID_FETCH_BATCH_SIZE,
+
+ downloadBatchSize: DEFAULT_DOWNLOAD_BATCH_SIZE,
+
+ async initialize() {
+ await this._toFetchStorage.load();
+ await this._previousFailedStorage.load();
+ Services.prefs.addObserver(
+ `${PREFS_BRANCH}engine.${this.prefName}`,
+ this.asyncObserver,
+ true
+ );
+ this._log.debug("SyncEngine initialized", this.name);
+ },
+
+ get prefName() {
+ return this.name;
+ },
+
+ get enabled() {
+ return this._enabled;
+ },
+
+ set enabled(val) {
+ if (!!val != this._enabled) {
+ Svc.PrefBranch.setBoolPref("engine." + this.prefName, !!val);
+ }
+ },
+
+ get score() {
+ return this._tracker.score;
+ },
+
+ get _store() {
+ let store = new this._storeObj(this.Name, this);
+ this.__defineGetter__("_store", () => store);
+ return store;
+ },
+
+ get _tracker() {
+ let tracker = new this._trackerObj(this.Name, this);
+ this.__defineGetter__("_tracker", () => tracker);
+ return tracker;
+ },
+
+ get storageURL() {
+ return this.service.storageURL;
+ },
+
+ get engineURL() {
+ return this.storageURL + this.name;
+ },
+
+ get cryptoKeysURL() {
+ return this.storageURL + "crypto/keys";
+ },
+
+ get metaURL() {
+ return this.storageURL + "meta/global";
+ },
+
+ startTracking() {
+ this._tracker.start();
+ },
+
+ // Returns a promise
+ stopTracking() {
+ return this._tracker.stop();
+ },
+
+ // Listens for engine enabled state changes, and updates the tracker's state.
+ // This is an async observer because the tracker waits on all its async
+ // observers to finish when it's stopped.
+ async observe(subject, topic, data) {
+ if (
+ topic == "nsPref:changed" &&
+ data == `services.sync.engine.${this.prefName}`
+ ) {
+ await this._tracker.onEngineEnabledChanged(this._enabled);
+ }
+ },
+
+ async sync() {
+ if (!this.enabled) {
+ return false;
+ }
+
+ if (!this._sync) {
+ throw new Error("engine does not implement _sync method");
+ }
+
+ return this._notify("sync", this.name, this._sync)();
+ },
+
+ // Override this method to return a new changeset type.
+ emptyChangeset() {
+ return new Changeset();
+ },
+
+ /**
+ * Returns the local sync ID for this engine, or `""` if the engine hasn't
+ * synced for the first time. This is exposed for tests.
+ *
+ * @return the current sync ID.
+ */
+ async getSyncID() {
+ return this._syncID;
+ },
+
+ /**
+ * Ensures that the local sync ID for the engine matches the sync ID for the
+ * collection on the server. A mismatch indicates that another client wiped
+ * the collection; we're syncing after a node reassignment, and another
+ * client synced before us; or the store was replaced since the last sync.
+ * In case of a mismatch, we need to reset all local Sync state and start
+ * over as a first sync.
+ *
+ * In most cases, this method should return the new sync ID as-is. However, an
+ * engine may ignore the given ID and assign a different one, if it determines
+ * that the sync ID on the server is out of date. The bookmarks engine uses
+ * this to wipe the server and other clients on the first sync after the user
+ * restores from a backup.
+ *
+ * @param newSyncID
+ * The new sync ID for the collection from `meta/global`.
+ * @return The assigned sync ID. If this doesn't match `newSyncID`, we'll
+ * replace the sync ID in `meta/global` with the assigned ID.
+ */
+ async ensureCurrentSyncID(newSyncID) {
+ let existingSyncID = this._syncID;
+ if (existingSyncID == newSyncID) {
+ return existingSyncID;
+ }
+ this._log.debug(
+ `Engine syncIDs differ (old="${existingSyncID}", new="${newSyncID}") - resetting the engine`
+ );
+ await this.resetClient();
+ Svc.PrefBranch.setStringPref(this.name + ".syncID", newSyncID);
+ Svc.PrefBranch.setStringPref(this.name + ".lastSync", "0");
+ return newSyncID;
+ },
+
+ /**
+ * Resets the local sync ID for the engine, wipes the server, and resets all
+ * local Sync state to start over as a first sync.
+ *
+ * @return the new sync ID.
+ */
+ async resetSyncID() {
+ let newSyncID = await this.resetLocalSyncID();
+ await this.wipeServer();
+ return newSyncID;
+ },
+
+ /**
+ * Resets the local sync ID for the engine, signaling that we're starting over
+ * as a first sync.
+ *
+ * @return the new sync ID.
+ */
+ async resetLocalSyncID() {
+ return this.ensureCurrentSyncID(Utils.makeGUID());
+ },
+
+ /**
+ * Allows overriding scheduler logic -- added to help reduce kinto server
+ * getting hammered because our scheduler never got tuned for it.
+ *
+ * Note: Overriding engines must take resyncs into account -- score will not
+ * be cleared.
+ */
+ shouldSkipSync(syncReason) {
+ return false;
+ },
+
+ /*
+ * lastSync is a timestamp in server time.
+ */
+ async getLastSync() {
+ return this._lastSync;
+ },
+ async setLastSync(lastSync) {
+ // Store the value as a string to keep floating point precision
+ Svc.PrefBranch.setStringPref(this.name + ".lastSync", lastSync.toString());
+ },
+ async resetLastSync() {
+ this._log.debug("Resetting " + this.name + " last sync time");
+ await this.setLastSync(0);
+ },
+
+ get hasSyncedThisSession() {
+ return this._hasSyncedThisSession;
+ },
+
+ set hasSyncedThisSession(hasSynced) {
+ this._hasSyncedThisSession = hasSynced;
+ },
+
+ get toFetch() {
+ this._toFetchStorage.ensureDataReady();
+ return this._toFetchStorage.data.ids;
+ },
+
+ set toFetch(ids) {
+ if (ids.constructor.name != "SerializableSet") {
+ throw new Error(
+ "Bug: Attempted to set toFetch to something that isn't a SerializableSet"
+ );
+ }
+ this._toFetchStorage.data = { ids };
+ this._toFetchStorage.saveSoon();
+ },
+
+ get previousFailed() {
+ this._previousFailedStorage.ensureDataReady();
+ return this._previousFailedStorage.data.ids;
+ },
+
+ set previousFailed(ids) {
+ if (ids.constructor.name != "SerializableSet") {
+ throw new Error(
+ "Bug: Attempted to set previousFailed to something that isn't a SerializableSet"
+ );
+ }
+ this._previousFailedStorage.data = { ids };
+ this._previousFailedStorage.saveSoon();
+ },
+
+ /*
+ * Returns a changeset for this sync. Engine implementations can override this
+ * method to bypass the tracker for certain or all changed items.
+ */
+ async getChangedIDs() {
+ return this._tracker.getChangedIDs();
+ },
+
+ // Create a new record using the store and add in metadata.
+ async _createRecord(id) {
+ let record = await this._store.createRecord(id, this.name);
+ record.id = id;
+ record.collection = this.name;
+ return record;
+ },
+
+ // Creates a tombstone Sync record with additional metadata.
+ _createTombstone(id) {
+ let tombstone = new this._recordObj(this.name, id);
+ tombstone.id = id;
+ tombstone.collection = this.name;
+ tombstone.deleted = true;
+ return tombstone;
+ },
+
+ // Any setup that needs to happen at the beginning of each sync.
+ async _syncStartup() {
+ // Determine if we need to wipe on outdated versions
+ let metaGlobal = await this.service.recordManager.get(this.metaURL);
+ let engines = metaGlobal.payload.engines || {};
+ let engineData = engines[this.name] || {};
+
+ // Assume missing versions are 0 and wipe the server
+ if ((engineData.version || 0) < this.version) {
+ this._log.debug("Old engine data: " + [engineData.version, this.version]);
+
+ // Clear the server and reupload everything on bad version or missing
+ // meta. Note that we don't regenerate per-collection keys here.
+ let newSyncID = await this.resetSyncID();
+
+ // Set the newer version and newly generated syncID
+ engineData.version = this.version;
+ engineData.syncID = newSyncID;
+
+ // Put the new data back into meta/global and mark for upload
+ engines[this.name] = engineData;
+ metaGlobal.payload.engines = engines;
+ metaGlobal.changed = true;
+ } else if (engineData.version > this.version) {
+ // Don't sync this engine if the server has newer data
+
+ let error = new Error("New data: " + [engineData.version, this.version]);
+ error.failureCode = VERSION_OUT_OF_DATE;
+ throw error;
+ } else {
+ // Changes to syncID mean we'll need to upload everything
+ let assignedSyncID = await this.ensureCurrentSyncID(engineData.syncID);
+ if (assignedSyncID != engineData.syncID) {
+ engineData.syncID = assignedSyncID;
+ metaGlobal.changed = true;
+ }
+ }
+
+ // Save objects that need to be uploaded in this._modified. As we
+ // successfully upload objects we remove them from this._modified. If an
+ // error occurs or any objects fail to upload, they will remain in
+ // this._modified. At the end of a sync, or after an error, we add all
+ // objects remaining in this._modified to the tracker.
+ let initialChanges = await this.pullChanges();
+ this._modified.replace(initialChanges);
+ // Clear the tracker now. If the sync fails we'll add the ones we failed
+ // to upload back.
+ this._tracker.clearChangedIDs();
+ this._tracker.resetScore();
+
+ // Keep track of what to delete at the end of sync
+ this._delete = {};
+ },
+
+ async pullChanges() {
+ let lastSync = await this.getLastSync();
+ if (lastSync) {
+ return this.pullNewChanges();
+ }
+ this._log.debug("First sync, uploading all items");
+ return this.pullAllChanges();
+ },
+
+ /**
+ * A tiny abstraction to make it easier to test incoming record
+ * application.
+ */
+ itemSource() {
+ return new Collection(this.engineURL, this._recordObj, this.service);
+ },
+
+ /**
+ * Download and apply remote records changed since the last sync. This
+ * happens in three stages.
+ *
+ * In the first stage, we fetch full records for all changed items, newest
+ * first, up to the download limit. The limit lets us make progress for large
+ * collections, where the sync is likely to be interrupted before we
+ * can fetch everything.
+ *
+ * In the second stage, we fetch the IDs of any remaining records changed
+ * since the last sync, add them to our backlog, and fast-forward our last
+ * sync time.
+ *
+ * In the third stage, we fetch and apply records for all backlogged IDs,
+ * as well as any records that failed to apply during the last sync. We
+ * request records for the IDs in chunks, to avoid exceeding URL length
+ * limits, then remove successfully applied records from the backlog, and
+ * record IDs of any records that failed to apply to retry on the next sync.
+ */
+ async _processIncoming() {
+ this._log.trace("Downloading & applying server changes");
+
+ let newitems = this.itemSource();
+ let lastSync = await this.getLastSync();
+
+ newitems.newer = lastSync;
+ newitems.full = true;
+
+ let downloadLimit = Infinity;
+ if (this.downloadLimit) {
+ // Fetch new records up to the download limit. Currently, only the history
+ // engine sets a limit, since the history collection has the highest volume
+ // of changed records between syncs. The other engines fetch all records
+ // changed since the last sync.
+ if (this._defaultSort) {
+ // A download limit with a sort order doesn't make sense: we won't know
+ // which records to backfill.
+ throw new Error("Can't specify download limit with default sort order");
+ }
+ newitems.sort = "newest";
+ downloadLimit = newitems.limit = this.downloadLimit;
+ } else if (this._defaultSort) {
+ // The bookmarks engine fetches records by sort index; other engines leave
+ // the order unspecified. We can remove `_defaultSort` entirely after bug
+ // 1305563: the sort index won't matter because we'll buffer all bookmarks
+ // before applying.
+ newitems.sort = this._defaultSort;
+ }
+
+ // applied => number of items that should be applied.
+ // failed => number of items that failed in this sync.
+ // newFailed => number of items that failed for the first time in this sync.
+ // reconciled => number of items that were reconciled.
+ // failedReasons => {name, count} of reasons a record failed
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let count = countTelemetry.incomingCounts;
+ let recordsToApply = [];
+ let failedInCurrentSync = new SerializableSet();
+
+ let oldestModified = this.lastModified;
+ let downloadedIDs = new Set();
+
+ // Stage 1: Fetch new records from the server, up to the download limit.
+ if (this.lastModified == null || this.lastModified > lastSync) {
+ let { response, records } = await newitems.getBatched(
+ this.downloadBatchSize
+ );
+ if (!response.success) {
+ response.failureCode = ENGINE_DOWNLOAD_FAIL;
+ throw response;
+ }
+
+ await Async.yieldingForEach(records, async record => {
+ downloadedIDs.add(record.id);
+
+ if (record.modified < oldestModified) {
+ oldestModified = record.modified;
+ }
+
+ let { shouldApply, error } = await this._maybeReconcile(record);
+ if (error) {
+ failedInCurrentSync.add(record.id);
+ count.failed++;
+ countTelemetry.addIncomingFailedReason(error.message);
+ return;
+ }
+ if (!shouldApply) {
+ count.reconciled++;
+ return;
+ }
+ recordsToApply.push(record);
+ });
+
+ let failedToApply = await this._applyRecords(
+ recordsToApply,
+ countTelemetry
+ );
+ Utils.setAddAll(failedInCurrentSync, failedToApply);
+
+ // `applied` is a bit of a misnomer: it counts records that *should* be
+ // applied, so it also includes records that we tried to apply and failed.
+ // `recordsToApply.length - failedToApply.length` is the number of records
+ // that we *successfully* applied.
+ count.failed += failedToApply.length;
+ count.applied += recordsToApply.length;
+ }
+
+ // Stage 2: If we reached our download limit, we might still have records
+ // on the server that changed since the last sync. Fetch the IDs for the
+ // remaining records, and add them to the backlog. Note that this stage
+ // only runs for engines that set a download limit.
+ if (downloadedIDs.size == downloadLimit) {
+ let guidColl = this.itemSource();
+
+ guidColl.newer = lastSync;
+ guidColl.older = oldestModified;
+ guidColl.sort = "oldest";
+
+ let guids = await guidColl.get();
+ if (!guids.success) {
+ throw guids;
+ }
+
+ // Filtering out already downloaded IDs here isn't necessary. We only do
+ // that in case the Sync server doesn't support `older` (bug 1316110).
+ let remainingIDs = guids.obj.filter(id => !downloadedIDs.has(id));
+ if (remainingIDs.length) {
+ this.toFetch = Utils.setAddAll(this.toFetch, remainingIDs);
+ }
+ }
+
+ // Fast-foward the lastSync timestamp since we have backlogged the
+ // remaining items.
+ if (lastSync < this.lastModified) {
+ lastSync = this.lastModified;
+ await this.setLastSync(lastSync);
+ }
+
+ // Stage 3: Backfill records from the backlog, and those that failed to
+ // decrypt or apply during the last sync. We only backfill up to the
+ // download limit, to prevent a large backlog for one engine from blocking
+ // the others. We'll keep processing the backlog on subsequent engine syncs.
+ let failedInPreviousSync = this.previousFailed;
+ let idsToBackfill = Array.from(
+ Utils.setAddAll(
+ Utils.subsetOfSize(this.toFetch, downloadLimit),
+ failedInPreviousSync
+ )
+ );
+
+ // Note that we intentionally overwrite the previously failed list here.
+ // Records that fail to decrypt or apply in two consecutive syncs are likely
+ // corrupt; we remove them from the list because retrying and failing on
+ // every subsequent sync just adds noise.
+ this.previousFailed = failedInCurrentSync;
+
+ let backfilledItems = this.itemSource();
+
+ backfilledItems.sort = "newest";
+ backfilledItems.full = true;
+
+ // `getBatched` includes the list of IDs as a query parameter, so we need to fetch
+ // records in chunks to avoid exceeding URI length limits.
+ if (this.guidFetchBatchSize) {
+ for (let ids of lazy.PlacesUtils.chunkArray(
+ idsToBackfill,
+ this.guidFetchBatchSize
+ )) {
+ backfilledItems.ids = ids;
+
+ let { response, records } = await backfilledItems.getBatched(
+ this.downloadBatchSize
+ );
+ if (!response.success) {
+ response.failureCode = ENGINE_DOWNLOAD_FAIL;
+ throw response;
+ }
+
+ let backfilledRecordsToApply = [];
+ let failedInBackfill = [];
+
+ await Async.yieldingForEach(records, async record => {
+ let { shouldApply, error } = await this._maybeReconcile(record);
+ if (error) {
+ failedInBackfill.push(record.id);
+ count.failed++;
+ countTelemetry.addIncomingFailedReason(error.message);
+ return;
+ }
+ if (!shouldApply) {
+ count.reconciled++;
+ return;
+ }
+ backfilledRecordsToApply.push(record);
+ });
+
+ let failedToApply = await this._applyRecords(
+ backfilledRecordsToApply,
+ countTelemetry
+ );
+ failedInBackfill.push(...failedToApply);
+
+ count.failed += failedToApply.length;
+ count.applied += backfilledRecordsToApply.length;
+
+ this.toFetch = Utils.setDeleteAll(this.toFetch, ids);
+ this.previousFailed = Utils.setAddAll(
+ this.previousFailed,
+ failedInBackfill
+ );
+
+ if (lastSync < this.lastModified) {
+ lastSync = this.lastModified;
+ await this.setLastSync(lastSync);
+ }
+ }
+ }
+
+ count.newFailed = 0;
+ for (let item of this.previousFailed) {
+ // Anything that failed in the current sync that also failed in
+ // the previous sync means there is likely something wrong with
+ // the record, we remove it from trying again to prevent
+ // infinitely syncing corrupted records
+ if (failedInPreviousSync.has(item)) {
+ this.previousFailed.delete(item);
+ } else {
+ // otherwise it's a new failed and we count it as so
+ ++count.newFailed;
+ }
+ }
+
+ count.succeeded = Math.max(0, count.applied - count.failed);
+ this._log.info(
+ [
+ "Records:",
+ count.applied,
+ "applied,",
+ count.succeeded,
+ "successfully,",
+ count.failed,
+ "failed to apply,",
+ count.newFailed,
+ "newly failed to apply,",
+ count.reconciled,
+ "reconciled.",
+ ].join(" ")
+ );
+ Observers.notify("weave:engine:sync:applied", count, this.name);
+ },
+
+ async _maybeReconcile(item) {
+ let key = this.service.collectionKeys.keyForCollection(this.name);
+
+ // Grab a later last modified if possible
+ if (this.lastModified == null || item.modified > this.lastModified) {
+ this.lastModified = item.modified;
+ }
+
+ try {
+ try {
+ await item.decrypt(key);
+ } catch (ex) {
+ if (!Utils.isHMACMismatch(ex)) {
+ throw ex;
+ }
+ let strategy = await this.handleHMACMismatch(item, true);
+ if (strategy == SyncEngine.kRecoveryStrategy.retry) {
+ // You only get one retry.
+ try {
+ // Try decrypting again, typically because we've got new keys.
+ this._log.info("Trying decrypt again...");
+ key = this.service.collectionKeys.keyForCollection(this.name);
+ await item.decrypt(key);
+ strategy = null;
+ } catch (ex) {
+ if (!Utils.isHMACMismatch(ex)) {
+ throw ex;
+ }
+ strategy = await this.handleHMACMismatch(item, false);
+ }
+ }
+
+ switch (strategy) {
+ case null:
+ // Retry succeeded! No further handling.
+ break;
+ case SyncEngine.kRecoveryStrategy.retry:
+ this._log.debug("Ignoring second retry suggestion.");
+ // Fall through to error case.
+ case SyncEngine.kRecoveryStrategy.error:
+ this._log.warn("Error decrypting record", ex);
+ return { shouldApply: false, error: ex };
+ case SyncEngine.kRecoveryStrategy.ignore:
+ this._log.debug(
+ "Ignoring record " + item.id + " with bad HMAC: already handled."
+ );
+ return { shouldApply: false, error: null };
+ }
+ }
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Error decrypting record", ex);
+ return { shouldApply: false, error: ex };
+ }
+
+ if (this._shouldDeleteRemotely(item)) {
+ this._log.trace("Deleting item from server without applying", item);
+ await this._deleteId(item.id);
+ return { shouldApply: false, error: null };
+ }
+
+ let shouldApply;
+ try {
+ shouldApply = await this._reconcile(item);
+ } catch (ex) {
+ if (ex.code == SyncEngine.prototype.eEngineAbortApplyIncoming) {
+ this._log.warn("Reconciliation failed: aborting incoming processing.");
+ throw ex.cause;
+ } else if (!Async.isShutdownException(ex)) {
+ this._log.warn("Failed to reconcile incoming record " + item.id, ex);
+ return { shouldApply: false, error: ex };
+ } else {
+ throw ex;
+ }
+ }
+
+ if (!shouldApply) {
+ this._log.trace("Skipping reconciled incoming item " + item.id);
+ }
+
+ return { shouldApply, error: null };
+ },
+
+ async _applyRecords(records, countTelemetry) {
+ this._tracker.ignoreAll = true;
+ try {
+ let failedIDs = await this._store.applyIncomingBatch(
+ records,
+ countTelemetry
+ );
+ return failedIDs;
+ } catch (ex) {
+ // Catch any error that escapes from applyIncomingBatch. At present
+ // those will all be abort events.
+ this._log.warn("Got exception, aborting processIncoming", ex);
+ throw ex;
+ } finally {
+ this._tracker.ignoreAll = false;
+ }
+ },
+
+ // Indicates whether an incoming item should be deleted from the server at
+ // the end of the sync. Engines can override this method to clean up records
+ // that shouldn't be on the server.
+ _shouldDeleteRemotely(remoteItem) {
+ return false;
+ },
+
+ /**
+ * Find a GUID of an item that is a duplicate of the incoming item but happens
+ * to have a different GUID
+ *
+ * @return GUID of the similar item; falsy otherwise
+ */
+ async _findDupe(item) {
+ // By default, assume there's no dupe items for the engine
+ },
+
+ /**
+ * Called before a remote record is discarded due to failed reconciliation.
+ * Used by bookmark sync to merge folder child orders.
+ */
+ beforeRecordDiscard(localRecord, remoteRecord, remoteIsNewer) {},
+
+ // Called when the server has a record marked as deleted, but locally we've
+ // changed it more recently than the deletion. If we return false, the
+ // record will be deleted locally. If we return true, we'll reupload the
+ // record to the server -- any extra work that's needed as part of this
+ // process should be done at this point (such as mark the record's parent
+ // for reuploading in the case of bookmarks).
+ async _shouldReviveRemotelyDeletedRecord(remoteItem) {
+ return true;
+ },
+
+ async _deleteId(id) {
+ await this._tracker.removeChangedID(id);
+ this._noteDeletedId(id);
+ },
+
+ // Marks an ID for deletion at the end of the sync.
+ _noteDeletedId(id) {
+ if (this._delete.ids == null) {
+ this._delete.ids = [id];
+ } else {
+ this._delete.ids.push(id);
+ }
+ },
+
+ async _switchItemToDupe(localDupeGUID, incomingItem) {
+ // The local, duplicate ID is always deleted on the server.
+ await this._deleteId(localDupeGUID);
+
+ // We unconditionally change the item's ID in case the engine knows of
+ // an item but doesn't expose it through itemExists. If the API
+ // contract were stronger, this could be changed.
+ this._log.debug(
+ "Switching local ID to incoming: " +
+ localDupeGUID +
+ " -> " +
+ incomingItem.id
+ );
+ return this._store.changeItemID(localDupeGUID, incomingItem.id);
+ },
+
+ /**
+ * Reconcile incoming record with local state.
+ *
+ * This function essentially determines whether to apply an incoming record.
+ *
+ * @param item
+ * Record from server to be tested for application.
+ * @return boolean
+ * Truthy if incoming record should be applied. False if not.
+ */
+ async _reconcile(item) {
+ if (this._log.level <= Log.Level.Trace) {
+ this._log.trace("Incoming: " + item);
+ }
+
+ // We start reconciling by collecting a bunch of state. We do this here
+ // because some state may change during the course of this function and we
+ // need to operate on the original values.
+ let existsLocally = await this._store.itemExists(item.id);
+ let locallyModified = this._modified.has(item.id);
+
+ // TODO Handle clock drift better. Tracked in bug 721181.
+ let remoteAge = Resource.serverTime - item.modified;
+ let localAge = locallyModified
+ ? Date.now() / 1000 - this._modified.getModifiedTimestamp(item.id)
+ : null;
+ let remoteIsNewer = remoteAge < localAge;
+
+ this._log.trace(
+ "Reconciling " +
+ item.id +
+ ". exists=" +
+ existsLocally +
+ "; modified=" +
+ locallyModified +
+ "; local age=" +
+ localAge +
+ "; incoming age=" +
+ remoteAge
+ );
+
+ // We handle deletions first so subsequent logic doesn't have to check
+ // deleted flags.
+ if (item.deleted) {
+ // If the item doesn't exist locally, there is nothing for us to do. We
+ // can't check for duplicates because the incoming record has no data
+ // which can be used for duplicate detection.
+ if (!existsLocally) {
+ this._log.trace(
+ "Ignoring incoming item because it was deleted and " +
+ "the item does not exist locally."
+ );
+ return false;
+ }
+
+ // We decide whether to process the deletion by comparing the record
+ // ages. If the item is not modified locally, the remote side wins and
+ // the deletion is processed. If it is modified locally, we take the
+ // newer record.
+ if (!locallyModified) {
+ this._log.trace(
+ "Applying incoming delete because the local item " +
+ "exists and isn't modified."
+ );
+ return true;
+ }
+ this._log.trace("Incoming record is deleted but we had local changes.");
+
+ if (remoteIsNewer) {
+ this._log.trace("Remote record is newer -- deleting local record.");
+ return true;
+ }
+ // If the local record is newer, we defer to individual engines for
+ // how to handle this. By default, we revive the record.
+ let willRevive = await this._shouldReviveRemotelyDeletedRecord(item);
+ this._log.trace("Local record is newer -- reviving? " + willRevive);
+
+ return !willRevive;
+ }
+
+ // At this point the incoming record is not for a deletion and must have
+ // data. If the incoming record does not exist locally, we check for a local
+ // duplicate existing under a different ID. The default implementation of
+ // _findDupe() is empty, so engines have to opt in to this functionality.
+ //
+ // If we find a duplicate, we change the local ID to the incoming ID and we
+ // refresh the metadata collected above. See bug 710448 for the history
+ // of this logic.
+ if (!existsLocally) {
+ let localDupeGUID = await this._findDupe(item);
+ if (localDupeGUID) {
+ this._log.trace(
+ "Local item " +
+ localDupeGUID +
+ " is a duplicate for " +
+ "incoming item " +
+ item.id
+ );
+
+ // The current API contract does not mandate that the ID returned by
+ // _findDupe() actually exists. Therefore, we have to perform this
+ // check.
+ existsLocally = await this._store.itemExists(localDupeGUID);
+
+ // If the local item was modified, we carry its metadata forward so
+ // appropriate reconciling can be performed.
+ if (this._modified.has(localDupeGUID)) {
+ locallyModified = true;
+ localAge =
+ this._tracker._now() -
+ this._modified.getModifiedTimestamp(localDupeGUID);
+ remoteIsNewer = remoteAge < localAge;
+
+ this._modified.changeID(localDupeGUID, item.id);
+ } else {
+ locallyModified = false;
+ localAge = null;
+ }
+
+ // Tell the engine to do whatever it needs to switch the items.
+ await this._switchItemToDupe(localDupeGUID, item);
+
+ this._log.debug(
+ "Local item after duplication: age=" +
+ localAge +
+ "; modified=" +
+ locallyModified +
+ "; exists=" +
+ existsLocally
+ );
+ } else {
+ this._log.trace("No duplicate found for incoming item: " + item.id);
+ }
+ }
+
+ // At this point we've performed duplicate detection. But, nothing here
+ // should depend on duplicate detection as the above should have updated
+ // state seamlessly.
+
+ if (!existsLocally) {
+ // If the item doesn't exist locally and we have no local modifications
+ // to the item (implying that it was not deleted), always apply the remote
+ // item.
+ if (!locallyModified) {
+ this._log.trace(
+ "Applying incoming because local item does not exist " +
+ "and was not deleted."
+ );
+ return true;
+ }
+
+ // If the item was modified locally but isn't present, it must have
+ // been deleted. If the incoming record is younger, we restore from
+ // that record.
+ if (remoteIsNewer) {
+ this._log.trace(
+ "Applying incoming because local item was deleted " +
+ "before the incoming item was changed."
+ );
+ this._modified.delete(item.id);
+ return true;
+ }
+
+ this._log.trace(
+ "Ignoring incoming item because the local item's " +
+ "deletion is newer."
+ );
+ return false;
+ }
+
+ // If the remote and local records are the same, there is nothing to be
+ // done, so we don't do anything. In the ideal world, this logic wouldn't
+ // be here and the engine would take a record and apply it. The reason we
+ // want to defer this logic is because it would avoid a redundant and
+ // possibly expensive dip into the storage layer to query item state.
+ // This should get addressed in the async rewrite, so we ignore it for now.
+ let localRecord = await this._createRecord(item.id);
+ let recordsEqual = Utils.deepEquals(item.cleartext, localRecord.cleartext);
+
+ // If the records are the same, we don't need to do anything. This does
+ // potentially throw away a local modification time. But, if the records
+ // are the same, does it matter?
+ if (recordsEqual) {
+ this._log.trace(
+ "Ignoring incoming item because the local item is identical."
+ );
+
+ this._modified.delete(item.id);
+ return false;
+ }
+
+ // At this point the records are different.
+
+ // If we have no local modifications, always take the server record.
+ if (!locallyModified) {
+ this._log.trace("Applying incoming record because no local conflicts.");
+ return true;
+ }
+
+ // At this point, records are different and the local record is modified.
+ // We resolve conflicts by record age, where the newest one wins. This does
+ // result in data loss and should be handled by giving the engine an
+ // opportunity to merge the records. Bug 720592 tracks this feature.
+ this._log.warn(
+ "DATA LOSS: Both local and remote changes to record: " + item.id
+ );
+ if (!remoteIsNewer) {
+ this.beforeRecordDiscard(localRecord, item, remoteIsNewer);
+ }
+ return remoteIsNewer;
+ },
+
+ // Upload outgoing records.
+ async _uploadOutgoing() {
+ this._log.trace("Uploading local changes to server.");
+
+ // collection we'll upload
+ let up = new Collection(this.engineURL, null, this.service);
+ let modifiedIDs = new Set(this._modified.ids());
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let counts = countTelemetry.outgoingCounts;
+ this._log.info(`Uploading ${modifiedIDs.size} outgoing records`);
+ if (modifiedIDs.size) {
+ counts.sent = modifiedIDs.size;
+
+ let failed = [];
+ let successful = [];
+ let lastSync = await this.getLastSync();
+ let handleResponse = async (postQueue, resp, batchOngoing) => {
+ // Note: We don't want to update this.lastSync, or this._modified until
+ // the batch is complete, however we want to remember success/failure
+ // indicators for when that happens.
+ if (!resp.success) {
+ this._log.debug(`Uploading records failed: ${resp.status}`);
+ resp.failureCode =
+ resp.status == 412 ? ENGINE_BATCH_INTERRUPTED : ENGINE_UPLOAD_FAIL;
+ throw resp;
+ }
+
+ // Update server timestamp from the upload.
+ failed = failed.concat(Object.keys(resp.obj.failed));
+ successful = successful.concat(resp.obj.success);
+
+ if (batchOngoing) {
+ // Nothing to do yet
+ return;
+ }
+
+ if (failed.length && this._log.level <= Log.Level.Debug) {
+ this._log.debug(
+ "Records that will be uploaded again because " +
+ "the server couldn't store them: " +
+ failed.join(", ")
+ );
+ }
+
+ counts.failed += failed.length;
+ Object.values(failed).forEach(message => {
+ countTelemetry.addOutgoingFailedReason(message);
+ });
+
+ for (let id of successful) {
+ this._modified.delete(id);
+ }
+
+ await this._onRecordsWritten(
+ successful,
+ failed,
+ postQueue.lastModified
+ );
+
+ // Advance lastSync since we've finished the batch.
+ if (postQueue.lastModified > lastSync) {
+ lastSync = postQueue.lastModified;
+ await this.setLastSync(lastSync);
+ }
+
+ // clear for next batch
+ failed.length = 0;
+ successful.length = 0;
+ };
+
+ let postQueue = up.newPostQueue(this._log, lastSync, handleResponse);
+
+ for (let id of modifiedIDs) {
+ let out;
+ let ok = false;
+ try {
+ out = await this._createRecord(id);
+ if (this._log.level <= Log.Level.Trace) {
+ this._log.trace("Outgoing: " + out);
+ }
+ await out.encrypt(
+ this.service.collectionKeys.keyForCollection(this.name)
+ );
+ ok = true;
+ } catch (ex) {
+ this._log.warn("Error creating record", ex);
+ ++counts.failed;
+ countTelemetry.addOutgoingFailedReason(ex.message);
+ if (Async.isShutdownException(ex) || !this.allowSkippedRecord) {
+ if (!this.allowSkippedRecord) {
+ // Don't bother for shutdown errors
+ Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+ }
+ throw ex;
+ }
+ }
+ if (ok) {
+ let { enqueued, error } = await postQueue.enqueue(out);
+ if (!enqueued) {
+ ++counts.failed;
+ countTelemetry.addOutgoingFailedReason(error.message);
+ if (!this.allowSkippedRecord) {
+ Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+ this._log.warn(
+ `Failed to enqueue record "${id}" (aborting)`,
+ error
+ );
+ throw error;
+ }
+ this._modified.delete(id);
+ this._log.warn(
+ `Failed to enqueue record "${id}" (skipping)`,
+ error
+ );
+ }
+ }
+ await Async.promiseYield();
+ }
+ await postQueue.flush(true);
+ }
+
+ if (counts.sent || counts.failed) {
+ Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+ }
+ },
+
+ async _onRecordsWritten(succeeded, failed, serverModifiedTime) {
+ // Implement this method to take specific actions against successfully
+ // uploaded records and failed records.
+ },
+
+ // Any cleanup necessary.
+ // Save the current snapshot so as to calculate changes at next sync
+ async _syncFinish() {
+ this._log.trace("Finishing up sync");
+
+ let doDelete = async (key, val) => {
+ let coll = new Collection(this.engineURL, this._recordObj, this.service);
+ coll[key] = val;
+ await coll.delete();
+ };
+
+ for (let [key, val] of Object.entries(this._delete)) {
+ // Remove the key for future uses
+ delete this._delete[key];
+
+ this._log.trace("doing post-sync deletions", { key, val });
+ // Send a simple delete for the property
+ if (key != "ids" || val.length <= 100) {
+ await doDelete(key, val);
+ } else {
+ // For many ids, split into chunks of at most 100
+ while (val.length) {
+ await doDelete(key, val.slice(0, 100));
+ val = val.slice(100);
+ }
+ }
+ }
+ this.hasSyncedThisSession = true;
+ await this._tracker.asyncObserver.promiseObserversComplete();
+ },
+
+ async _syncCleanup() {
+ try {
+ // Mark failed WBOs as changed again so they are reuploaded next time.
+ await this.trackRemainingChanges();
+ } finally {
+ this._modified.clear();
+ }
+ },
+
+ async _sync() {
+ try {
+ Async.checkAppReady();
+ await this._syncStartup();
+ Async.checkAppReady();
+ Observers.notify("weave:engine:sync:status", "process-incoming");
+ await this._processIncoming();
+ Async.checkAppReady();
+ Observers.notify("weave:engine:sync:status", "upload-outgoing");
+ try {
+ await this._uploadOutgoing();
+ Async.checkAppReady();
+ await this._syncFinish();
+ } catch (ex) {
+ if (!ex.status || ex.status != 412) {
+ throw ex;
+ }
+ // a 412 posting just means another client raced - but we don't want
+ // to treat that as a sync error - the next sync is almost certain
+ // to work.
+ this._log.warn("412 error during sync - will retry.");
+ }
+ } finally {
+ await this._syncCleanup();
+ }
+ },
+
+ async canDecrypt() {
+ // Report failure even if there's nothing to decrypt
+ let canDecrypt = false;
+
+ // Fetch the most recently uploaded record and try to decrypt it
+ let test = new Collection(this.engineURL, this._recordObj, this.service);
+ test.limit = 1;
+ test.sort = "newest";
+ test.full = true;
+
+ let key = this.service.collectionKeys.keyForCollection(this.name);
+
+ // Any failure fetching/decrypting will just result in false
+ try {
+ this._log.trace("Trying to decrypt a record from the server..");
+ let json = (await test.get()).obj[0];
+ let record = new this._recordObj();
+ record.deserialize(json);
+ await record.decrypt(key);
+ canDecrypt = true;
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Failed test decrypt", ex);
+ }
+
+ return canDecrypt;
+ },
+
+ /**
+ * Deletes the collection for this engine on the server, and removes all local
+ * Sync metadata for this engine. This does *not* remove any existing data on
+ * other clients. This is called when we reset the sync ID.
+ */
+ async wipeServer() {
+ await this._deleteServerCollection();
+ await this._resetClient();
+ },
+
+ /**
+ * Deletes the collection for this engine on the server, without removing
+ * any local Sync metadata or user data. Deleting the collection will not
+ * remove any user data on other clients, but will force other clients to
+ * start over as a first sync.
+ */
+ async _deleteServerCollection() {
+ let response = await this.service.resource(this.engineURL).delete();
+ if (response.status != 200 && response.status != 404) {
+ throw response;
+ }
+ },
+
+ async removeClientData() {
+ // Implement this method in engines that store client specific data
+ // on the server.
+ },
+
+ /*
+ * Decide on (and partially effect) an error-handling strategy.
+ *
+ * Asks the Service to respond to an HMAC error, which might result in keys
+ * being downloaded. That call returns true if an action which might allow a
+ * retry to occur.
+ *
+ * If `mayRetry` is truthy, and the Service suggests a retry,
+ * handleHMACMismatch returns kRecoveryStrategy.retry. Otherwise, it returns
+ * kRecoveryStrategy.error.
+ *
+ * Subclasses of SyncEngine can override this method to allow for different
+ * behavior -- e.g., to delete and ignore erroneous entries.
+ *
+ * All return values will be part of the kRecoveryStrategy enumeration.
+ */
+ async handleHMACMismatch(item, mayRetry) {
+ // By default we either try again, or bail out noisily.
+ return (await this.service.handleHMACEvent()) && mayRetry
+ ? SyncEngine.kRecoveryStrategy.retry
+ : SyncEngine.kRecoveryStrategy.error;
+ },
+
+ /**
+ * Returns a changeset containing all items in the store. The default
+ * implementation returns a changeset with timestamps from long ago, to
+ * ensure we always use the remote version if one exists.
+ *
+ * This function is only called for the first sync. Subsequent syncs call
+ * `pullNewChanges`.
+ *
+ * @return A `Changeset` object.
+ */
+ async pullAllChanges() {
+ let changes = {};
+ let ids = await this._store.getAllIDs();
+ for (let id in ids) {
+ changes[id] = 0;
+ }
+ return changes;
+ },
+
+ /*
+ * Returns a changeset containing entries for all currently tracked items.
+ * The default implementation returns a changeset with timestamps indicating
+ * when the item was added to the tracker.
+ *
+ * @return A `Changeset` object.
+ */
+ async pullNewChanges() {
+ await this._tracker.asyncObserver.promiseObserversComplete();
+ return this.getChangedIDs();
+ },
+
+ /**
+ * Adds all remaining changeset entries back to the tracker, typically for
+ * items that failed to upload. This method is called at the end of each sync.
+ *
+ */
+ async trackRemainingChanges() {
+ for (let [id, change] of this._modified.entries()) {
+ await this._tracker.addChangedID(id, change);
+ }
+ },
+
+ /**
+ * Removes all local Sync metadata for this engine, but keeps all existing
+ * local user data.
+ */
+ async resetClient() {
+ return this._notify("reset-client", this.name, this._resetClient)();
+ },
+
+ async _resetClient() {
+ await this.resetLastSync();
+ this.hasSyncedThisSession = false;
+ this.previousFailed = new SerializableSet();
+ this.toFetch = new SerializableSet();
+ },
+
+ /**
+ * Removes all local Sync metadata and user data for this engine.
+ */
+ async wipeClient() {
+ return this._notify("wipe-client", this.name, this._wipeClient)();
+ },
+
+ async _wipeClient() {
+ await this.resetClient();
+ this._log.debug("Deleting all local data");
+ this._tracker.ignoreAll = true;
+ await this._store.wipe();
+ this._tracker.ignoreAll = false;
+ this._tracker.clearChangedIDs();
+ },
+
+ /**
+ * If one exists, initialize and return a validator for this engine (which
+ * must have a `validate(engine)` method that returns a promise to an object
+ * with a getSummary method). Otherwise return null.
+ */
+ getValidator() {
+ return null;
+ },
+
+ async finalize() {
+ Services.prefs.removeObserver(
+ `${PREFS_BRANCH}engine.${this.prefName}`,
+ this.asyncObserver
+ );
+ await this.asyncObserver.promiseObserversComplete();
+ await this._tracker.finalize();
+ await this._toFetchStorage.finalize();
+ await this._previousFailedStorage.finalize();
+ },
+
+ // Returns a new watchdog. Exposed for tests.
+ _newWatchdog() {
+ return Async.watchdog();
+ },
+};
+
+/**
+ * A changeset is created for each sync in `Engine::get{Changed, All}IDs`,
+ * and stores opaque change data for tracked IDs. The default implementation
+ * only records timestamps, though engines can extend this to store additional
+ * data for each entry.
+ */
+export class Changeset {
+ // Creates an empty changeset.
+ constructor() {
+ this.changes = {};
+ }
+
+ // Returns the last modified time, in seconds, for an entry in the changeset.
+ // `id` is guaranteed to be in the set.
+ getModifiedTimestamp(id) {
+ return this.changes[id];
+ }
+
+ // Adds a change for a tracked ID to the changeset.
+ set(id, change) {
+ this.changes[id] = change;
+ }
+
+ // Adds multiple entries to the changeset, preserving existing entries.
+ insert(changes) {
+ Object.assign(this.changes, changes);
+ }
+
+ // Overwrites the existing set of tracked changes with new entries.
+ replace(changes) {
+ this.changes = changes;
+ }
+
+ // Indicates whether an entry is in the changeset.
+ has(id) {
+ return id in this.changes;
+ }
+
+ // Deletes an entry from the changeset. Used to clean up entries for
+ // reconciled and successfully uploaded records.
+ delete(id) {
+ delete this.changes[id];
+ }
+
+ // Changes the ID of an entry in the changeset. Used when reconciling
+ // duplicates that have local changes.
+ changeID(oldID, newID) {
+ this.changes[newID] = this.changes[oldID];
+ delete this.changes[oldID];
+ }
+
+ // Returns an array of all tracked IDs in this changeset.
+ ids() {
+ return Object.keys(this.changes);
+ }
+
+ // Returns an array of `[id, change]` tuples. Used to repopulate the tracker
+ // with entries for failed uploads at the end of a sync.
+ entries() {
+ return Object.entries(this.changes);
+ }
+
+ // Returns the number of entries in this changeset.
+ count() {
+ return this.ids().length;
+ }
+
+ // Clears the changeset.
+ clear() {
+ this.changes = {};
+ }
+}
diff --git a/services/sync/modules/engines/addons.sys.mjs b/services/sync/modules/engines/addons.sys.mjs
new file mode 100644
index 0000000000..782d23239e
--- /dev/null
+++ b/services/sync/modules/engines/addons.sys.mjs
@@ -0,0 +1,818 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file defines the add-on sync functionality.
+ *
+ * There are currently a number of known limitations:
+ * - We only sync XPI extensions and themes available from addons.mozilla.org.
+ * We hope to expand support for other add-ons eventually.
+ * - We only attempt syncing of add-ons between applications of the same type.
+ * This means add-ons will not synchronize between Firefox desktop and
+ * Firefox mobile, for example. This is because of significant add-on
+ * incompatibility between application types.
+ *
+ * Add-on records exist for each known {add-on, app-id} pair in the Sync client
+ * set. Each record has a randomly chosen GUID. The records then contain
+ * basic metadata about the add-on.
+ *
+ * We currently synchronize:
+ *
+ * - Installations
+ * - Uninstallations
+ * - User enabling and disabling
+ *
+ * Synchronization is influenced by the following preferences:
+ *
+ * - services.sync.addons.ignoreUserEnabledChanges
+ * - services.sync.addons.trustedSourceHostnames
+ *
+ * and also influenced by whether addons have repository caching enabled and
+ * whether they allow installation of addons from insecure options (both of
+ * which are themselves influenced by the "extensions." pref branch)
+ *
+ * See the documentation in all.js for the behavior of these prefs.
+ */
+
+import { AddonUtils } from "resource://services-sync/addonutils.sys.mjs";
+import { AddonsReconciler } from "resource://services-sync/addonsreconciler.sys.mjs";
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import { CollectionValidator } from "resource://services-sync/collection_validator.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+ AddonRepository: "resource://gre/modules/addons/AddonRepository.sys.mjs",
+});
+
+// 7 days in milliseconds.
+const PRUNE_ADDON_CHANGES_THRESHOLD = 60 * 60 * 24 * 7 * 1000;
+
+/**
+ * AddonRecord represents the state of an add-on in an application.
+ *
+ * Each add-on has its own record for each application ID it is installed
+ * on.
+ *
+ * The ID of add-on records is a randomly-generated GUID. It is random instead
+ * of deterministic so the URIs of the records cannot be guessed and so
+ * compromised server credentials won't result in disclosure of the specific
+ * add-ons present in a Sync account.
+ *
+ * The record contains the following fields:
+ *
+ * addonID
+ * ID of the add-on. This correlates to the "id" property on an Addon type.
+ *
+ * applicationID
+ * The application ID this record is associated with.
+ *
+ * enabled
+ * Boolean stating whether add-on is enabled or disabled by the user.
+ *
+ * source
+ * String indicating where an add-on is from. Currently, we only support
+ * the value "amo" which indicates that the add-on came from the official
+ * add-ons repository, addons.mozilla.org. In the future, we may support
+ * installing add-ons from other sources. This provides a future-compatible
+ * mechanism for clients to only apply records they know how to handle.
+ */
+function AddonRecord(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+AddonRecord.prototype = {
+ _logName: "Record.Addon",
+};
+Object.setPrototypeOf(AddonRecord.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(AddonRecord, "cleartext", [
+ "addonID",
+ "applicationID",
+ "enabled",
+ "source",
+]);
+
+/**
+ * The AddonsEngine handles synchronization of add-ons between clients.
+ *
+ * The engine maintains an instance of an AddonsReconciler, which is the entity
+ * maintaining state for add-ons. It provides the history and tracking APIs
+ * that AddonManager doesn't.
+ *
+ * The engine instance overrides a handful of functions on the base class. The
+ * rationale for each is documented by that function.
+ */
+export function AddonsEngine(service) {
+ SyncEngine.call(this, "Addons", service);
+
+ this._reconciler = new AddonsReconciler(this._tracker.asyncObserver);
+}
+
+AddonsEngine.prototype = {
+ _storeObj: AddonsStore,
+ _trackerObj: AddonsTracker,
+ _recordObj: AddonRecord,
+ version: 1,
+
+ syncPriority: 5,
+
+ _reconciler: null,
+
+ async initialize() {
+ await SyncEngine.prototype.initialize.call(this);
+ await this._reconciler.ensureStateLoaded();
+ },
+
+ /**
+ * Override parent method to find add-ons by their public ID, not Sync GUID.
+ */
+ async _findDupe(item) {
+ let id = item.addonID;
+
+ // The reconciler should have been updated at the top of the sync, so we
+ // can assume it is up to date when this function is called.
+ let addons = this._reconciler.addons;
+ if (!(id in addons)) {
+ return null;
+ }
+
+ let addon = addons[id];
+ if (addon.guid != item.id) {
+ return addon.guid;
+ }
+
+ return null;
+ },
+
+ /**
+ * Override getChangedIDs to pull in tracker changes plus changes from the
+ * reconciler log.
+ */
+ async getChangedIDs() {
+ let changes = {};
+ const changedIDs = await this._tracker.getChangedIDs();
+ for (let [id, modified] of Object.entries(changedIDs)) {
+ changes[id] = modified;
+ }
+
+ let lastSync = await this.getLastSync();
+ let lastSyncDate = new Date(lastSync * 1000);
+
+ // The reconciler should have been refreshed at the beginning of a sync and
+ // we assume this function is only called from within a sync.
+ let reconcilerChanges = this._reconciler.getChangesSinceDate(lastSyncDate);
+ let addons = this._reconciler.addons;
+ for (let change of reconcilerChanges) {
+ let changeTime = change[0];
+ let id = change[2];
+
+ if (!(id in addons)) {
+ continue;
+ }
+
+ // Keep newest modified time.
+ if (id in changes && changeTime < changes[id]) {
+ continue;
+ }
+
+ if (!(await this.isAddonSyncable(addons[id]))) {
+ continue;
+ }
+
+ this._log.debug("Adding changed add-on from changes log: " + id);
+ let addon = addons[id];
+ changes[addon.guid] = changeTime.getTime() / 1000;
+ }
+
+ return changes;
+ },
+
+ /**
+ * Override start of sync function to refresh reconciler.
+ *
+ * Many functions in this class assume the reconciler is refreshed at the
+ * top of a sync. If this ever changes, those functions should be revisited.
+ *
+ * Technically speaking, we don't need to refresh the reconciler on every
+ * sync since it is installed as an AddonManager listener. However, add-ons
+ * are complicated and we force a full refresh, just in case the listeners
+ * missed something.
+ */
+ async _syncStartup() {
+ // We refresh state before calling parent because syncStartup in the parent
+ // looks for changed IDs, which is dependent on add-on state being up to
+ // date.
+ await this._refreshReconcilerState();
+ return SyncEngine.prototype._syncStartup.call(this);
+ },
+
+ /**
+ * Override end of sync to perform a little housekeeping on the reconciler.
+ *
+ * We prune changes to prevent the reconciler state from growing without
+ * bound. Even if it grows unbounded, there would have to be many add-on
+ * changes (thousands) for it to slow things down significantly. This is
+ * highly unlikely to occur. Still, we exercise defense just in case.
+ */
+ async _syncCleanup() {
+ let lastSync = await this.getLastSync();
+ let ms = 1000 * lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
+ this._reconciler.pruneChangesBeforeDate(new Date(ms));
+ return SyncEngine.prototype._syncCleanup.call(this);
+ },
+
+ /**
+ * Helper function to ensure reconciler is up to date.
+ *
+ * This will load the reconciler's state from the file
+ * system (if needed) and refresh the state of the reconciler.
+ */
+ async _refreshReconcilerState() {
+ this._log.debug("Refreshing reconciler state");
+ return this._reconciler.refreshGlobalState();
+ },
+
+ // Returns a promise
+ isAddonSyncable(addon, ignoreRepoCheck) {
+ return this._store.isAddonSyncable(addon, ignoreRepoCheck);
+ },
+};
+Object.setPrototypeOf(AddonsEngine.prototype, SyncEngine.prototype);
+
+/**
+ * This is the primary interface between Sync and the Addons Manager.
+ *
+ * In addition to the core store APIs, we provide convenience functions to wrap
+ * Add-on Manager APIs with Sync-specific semantics.
+ */
+function AddonsStore(name, engine) {
+ Store.call(this, name, engine);
+}
+AddonsStore.prototype = {
+ // Define the add-on types (.type) that we support.
+ _syncableTypes: ["extension", "theme"],
+
+ _extensionsPrefs: Services.prefs.getBranch("extensions."),
+
+ get reconciler() {
+ return this.engine._reconciler;
+ },
+
+ /**
+ * Override applyIncoming to filter out records we can't handle.
+ */
+ async applyIncoming(record) {
+ // The fields we look at aren't present when the record is deleted.
+ if (!record.deleted) {
+ // Ignore records not belonging to our application ID because that is the
+ // current policy.
+ if (record.applicationID != Services.appinfo.ID) {
+ this._log.info(
+ "Ignoring incoming record from other App ID: " + record.id
+ );
+ return;
+ }
+
+ // Ignore records that aren't from the official add-on repository, as that
+ // is our current policy.
+ if (record.source != "amo") {
+ this._log.info(
+ "Ignoring unknown add-on source (" +
+ record.source +
+ ")" +
+ " for " +
+ record.id
+ );
+ return;
+ }
+ }
+
+ // Ignore incoming records for which an existing non-syncable addon
+ // exists. Note that we do not insist that the addon manager already have
+ // metadata for this addon - it's possible our reconciler previously saw the
+ // addon but the addon-manager cache no longer has it - which is fine for a
+ // new incoming addon.
+ // (Note that most other cases where the addon-manager cache is invalid
+ // doesn't get this treatment because that cache self-repairs after some
+ // time - but it only re-populates addons which are currently installed.)
+ let existingMeta = this.reconciler.addons[record.addonID];
+ if (
+ existingMeta &&
+ !(await this.isAddonSyncable(existingMeta, /* ignoreRepoCheck */ true))
+ ) {
+ this._log.info(
+ "Ignoring incoming record for an existing but non-syncable addon",
+ record.addonID
+ );
+ return;
+ }
+
+ await Store.prototype.applyIncoming.call(this, record);
+ },
+
+ /**
+ * Provides core Store API to create/install an add-on from a record.
+ */
+ async create(record) {
+ // This will throw if there was an error. This will get caught by the sync
+ // engine and the record will try to be applied later.
+ const results = await AddonUtils.installAddons([
+ {
+ id: record.addonID,
+ syncGUID: record.id,
+ enabled: record.enabled,
+ requireSecureURI: this._extensionsPrefs.getBoolPref(
+ "install.requireSecureOrigin",
+ true
+ ),
+ },
+ ]);
+
+ if (results.skipped.includes(record.addonID)) {
+ this._log.info("Add-on skipped: " + record.addonID);
+ // Just early-return for skipped addons - we don't want to arrange to
+ // try again next time because the condition that caused up to skip
+ // will remain true for this addon forever.
+ return;
+ }
+
+ let addon;
+ for (let a of results.addons) {
+ if (a.id == record.addonID) {
+ addon = a;
+ break;
+ }
+ }
+
+ // This should never happen, but is present as a fail-safe.
+ if (!addon) {
+ throw new Error("Add-on not found after install: " + record.addonID);
+ }
+
+ this._log.info("Add-on installed: " + record.addonID);
+ },
+
+ /**
+ * Provides core Store API to remove/uninstall an add-on from a record.
+ */
+ async remove(record) {
+ // If this is called, the payload is empty, so we have to find by GUID.
+ let addon = await this.getAddonByGUID(record.id);
+ if (!addon) {
+ // We don't throw because if the add-on could not be found then we assume
+ // it has already been uninstalled and there is nothing for this function
+ // to do.
+ return;
+ }
+
+ this._log.info("Uninstalling add-on: " + addon.id);
+ await AddonUtils.uninstallAddon(addon);
+ },
+
+ /**
+ * Provides core Store API to update an add-on from a record.
+ */
+ async update(record) {
+ let addon = await this.getAddonByID(record.addonID);
+
+ // update() is called if !this.itemExists. And, since itemExists consults
+ // the reconciler only, we need to take care of some corner cases.
+ //
+ // First, the reconciler could know about an add-on that was uninstalled
+ // and no longer present in the add-ons manager.
+ if (!addon) {
+ await this.create(record);
+ return;
+ }
+
+ // It's also possible that the add-on is non-restartless and has pending
+ // install/uninstall activity.
+ //
+ // We wouldn't get here if the incoming record was for a deletion. So,
+ // check for pending uninstall and cancel if necessary.
+ if (addon.pendingOperations & lazy.AddonManager.PENDING_UNINSTALL) {
+ addon.cancelUninstall();
+
+ // We continue with processing because there could be state or ID change.
+ }
+
+ await this.updateUserDisabled(addon, !record.enabled);
+ },
+
+ /**
+ * Provide core Store API to determine if a record exists.
+ */
+ async itemExists(guid) {
+ let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
+
+ return !!addon;
+ },
+
+ /**
+ * Create an add-on record from its GUID.
+ *
+ * @param guid
+ * Add-on GUID (from extensions DB)
+ * @param collection
+ * Collection to add record to.
+ *
+ * @return AddonRecord instance
+ */
+ async createRecord(guid, collection) {
+ let record = new AddonRecord(collection, guid);
+ record.applicationID = Services.appinfo.ID;
+
+ let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
+
+ // If we don't know about this GUID or if it has been uninstalled, we mark
+ // the record as deleted.
+ if (!addon || !addon.installed) {
+ record.deleted = true;
+ return record;
+ }
+
+ record.modified = addon.modified.getTime() / 1000;
+
+ record.addonID = addon.id;
+ record.enabled = addon.enabled;
+
+ // This needs to be dynamic when add-ons don't come from AddonRepository.
+ record.source = "amo";
+
+ return record;
+ },
+
+ /**
+ * Changes the id of an add-on.
+ *
+ * This implements a core API of the store.
+ */
+ async changeItemID(oldID, newID) {
+ // We always update the GUID in the reconciler because it will be
+ // referenced later in the sync process.
+ let state = this.reconciler.getAddonStateFromSyncGUID(oldID);
+ if (state) {
+ state.guid = newID;
+ await this.reconciler.saveState();
+ }
+
+ let addon = await this.getAddonByGUID(oldID);
+ if (!addon) {
+ this._log.debug(
+ "Cannot change item ID (" +
+ oldID +
+ ") in Add-on " +
+ "Manager because old add-on not present: " +
+ oldID
+ );
+ return;
+ }
+
+ addon.syncGUID = newID;
+ },
+
+ /**
+ * Obtain the set of all syncable add-on Sync GUIDs.
+ *
+ * This implements a core Store API.
+ */
+ async getAllIDs() {
+ let ids = {};
+
+ let addons = this.reconciler.addons;
+ for (let id in addons) {
+ let addon = addons[id];
+ if (await this.isAddonSyncable(addon)) {
+ ids[addon.guid] = true;
+ }
+ }
+
+ return ids;
+ },
+
+ /**
+ * Wipe engine data.
+ *
+ * This uninstalls all syncable addons from the application. In case of
+ * error, it logs the error and keeps trying with other add-ons.
+ */
+ async wipe() {
+ this._log.info("Processing wipe.");
+
+ await this.engine._refreshReconcilerState();
+
+ // We only wipe syncable add-ons. Wipe is a Sync feature not a security
+ // feature.
+ let ids = await this.getAllIDs();
+ for (let guid in ids) {
+ let addon = await this.getAddonByGUID(guid);
+ if (!addon) {
+ this._log.debug(
+ "Ignoring add-on because it couldn't be obtained: " + guid
+ );
+ continue;
+ }
+
+ this._log.info("Uninstalling add-on as part of wipe: " + addon.id);
+ await Utils.catch.call(this, () => addon.uninstall())();
+ }
+ },
+
+ /** *************************************************************************
+ * Functions below are unique to this store and not part of the Store API *
+ ***************************************************************************/
+
+ /**
+ * Obtain an add-on from its public ID.
+ *
+ * @param id
+ * Add-on ID
+ * @return Addon or undefined if not found
+ */
+ async getAddonByID(id) {
+ return lazy.AddonManager.getAddonByID(id);
+ },
+
+ /**
+ * Obtain an add-on from its Sync GUID.
+ *
+ * @param guid
+ * Add-on Sync GUID
+ * @return DBAddonInternal or null
+ */
+ async getAddonByGUID(guid) {
+ return lazy.AddonManager.getAddonBySyncGUID(guid);
+ },
+
+ /**
+ * Determines whether an add-on is suitable for Sync.
+ *
+ * @param addon
+ * Addon instance
+ * @param ignoreRepoCheck
+ * Should we skip checking the Addons repository (primarially useful
+ * for testing and validation).
+ * @return Boolean indicating whether it is appropriate for Sync
+ */
+ async isAddonSyncable(addon, ignoreRepoCheck = false) {
+ // Currently, we limit syncable add-ons to those that are:
+ // 1) In a well-defined set of types
+ // 2) Installed in the current profile
+ // 3) Not installed by a foreign entity (i.e. installed by the app)
+ // since they act like global extensions.
+ // 4) Is not a hotfix.
+ // 5) The addons XPIProvider doesn't veto it (i.e not being installed in
+ // the profile directory, or any other reasons it says the addon can't
+ // be synced)
+ // 6) Are installed from AMO
+
+ // We could represent the test as a complex boolean expression. We go the
+ // verbose route so the failure reason is logged.
+ if (!addon) {
+ this._log.debug("Null object passed to isAddonSyncable.");
+ return false;
+ }
+
+ if (!this._syncableTypes.includes(addon.type)) {
+ this._log.debug(
+ addon.id + " not syncable: type not in allowed list: " + addon.type
+ );
+ return false;
+ }
+
+ if (!(addon.scope & lazy.AddonManager.SCOPE_PROFILE)) {
+ this._log.debug(addon.id + " not syncable: not installed in profile.");
+ return false;
+ }
+
+ // If the addon manager says it's not syncable, we skip it.
+ if (!addon.isSyncable) {
+ this._log.debug(addon.id + " not syncable: vetoed by the addon manager.");
+ return false;
+ }
+
+ // This may be too aggressive. If an add-on is downloaded from AMO and
+ // manually placed in the profile directory, foreignInstall will be set.
+ // Arguably, that add-on should be syncable.
+ // TODO Address the edge case and come up with more robust heuristics.
+ if (addon.foreignInstall) {
+ this._log.debug(addon.id + " not syncable: is foreign install.");
+ return false;
+ }
+
+ // If the AddonRepository's cache isn't enabled (which it typically isn't
+ // in tests), getCachedAddonByID always returns null - so skip the check
+ // in that case. We also provide a way to specifically opt-out of the check
+ // even if the cache is enabled, which is used by the validators.
+ if (ignoreRepoCheck || !lazy.AddonRepository.cacheEnabled) {
+ return true;
+ }
+
+ let result = await new Promise(res => {
+ lazy.AddonRepository.getCachedAddonByID(addon.id, res);
+ });
+
+ if (!result) {
+ this._log.debug(
+ addon.id + " not syncable: add-on not found in add-on repository."
+ );
+ return false;
+ }
+
+ return this.isSourceURITrusted(result.sourceURI);
+ },
+
+ /**
+ * Determine whether an add-on's sourceURI field is trusted and the add-on
+ * can be installed.
+ *
+ * This function should only ever be called from isAddonSyncable(). It is
+ * exposed as a separate function to make testing easier.
+ *
+ * @param uri
+ * nsIURI instance to validate
+ * @return bool
+ */
+ isSourceURITrusted: function isSourceURITrusted(uri) {
+ // For security reasons, we currently limit synced add-ons to those
+ // installed from trusted hostname(s). We additionally require TLS with
+ // the add-ons site to help prevent forgeries.
+ let trustedHostnames = Svc.PrefBranch.getStringPref(
+ "addons.trustedSourceHostnames",
+ ""
+ ).split(",");
+
+ if (!uri) {
+ this._log.debug("Undefined argument to isSourceURITrusted().");
+ return false;
+ }
+
+ // Scheme is validated before the hostname because uri.host may not be
+ // populated for certain schemes. It appears to always be populated for
+ // https, so we avoid the potential NS_ERROR_FAILURE on field access.
+ if (uri.scheme != "https") {
+ this._log.debug("Source URI not HTTPS: " + uri.spec);
+ return false;
+ }
+
+ if (!trustedHostnames.includes(uri.host)) {
+ this._log.debug("Source hostname not trusted: " + uri.host);
+ return false;
+ }
+
+ return true;
+ },
+
+ /**
+ * Update the userDisabled flag on an add-on.
+ *
+ * This will enable or disable an add-on. It has no return value and does
+ * not catch or handle exceptions thrown by the addon manager. If no action
+ * is needed it will return immediately.
+ *
+ * @param addon
+ * Addon instance to manipulate.
+ * @param value
+ * Boolean to which to set userDisabled on the passed Addon.
+ */
+ async updateUserDisabled(addon, value) {
+ if (addon.userDisabled == value) {
+ return;
+ }
+
+ // A pref allows changes to the enabled flag to be ignored.
+ if (Svc.PrefBranch.getBoolPref("addons.ignoreUserEnabledChanges", false)) {
+ this._log.info(
+ "Ignoring enabled state change due to preference: " + addon.id
+ );
+ return;
+ }
+
+ AddonUtils.updateUserDisabled(addon, value);
+ // updating this flag doesn't send a notification for appDisabled addons,
+ // meaning the reconciler will not update its state and may resync the
+ // addon - so explicitly rectify the state (bug 1366994)
+ if (addon.appDisabled) {
+ await this.reconciler.rectifyStateFromAddon(addon);
+ }
+ },
+};
+
+Object.setPrototypeOf(AddonsStore.prototype, Store.prototype);
+
+/**
+ * The add-ons tracker keeps track of real-time changes to add-ons.
+ *
+ * It hooks up to the reconciler and receives notifications directly from it.
+ */
+function AddonsTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+AddonsTracker.prototype = {
+ get reconciler() {
+ return this.engine._reconciler;
+ },
+
+ get store() {
+ return this.engine._store;
+ },
+
+ /**
+ * This callback is executed whenever the AddonsReconciler sends out a change
+ * notification. See AddonsReconciler.addChangeListener().
+ */
+ async changeListener(date, change, addon) {
+ this._log.debug("changeListener invoked: " + change + " " + addon.id);
+ // Ignore changes that occur during sync.
+ if (this.ignoreAll) {
+ return;
+ }
+
+ if (!(await this.store.isAddonSyncable(addon))) {
+ this._log.debug(
+ "Ignoring change because add-on isn't syncable: " + addon.id
+ );
+ return;
+ }
+
+ const added = await this.addChangedID(addon.guid, date.getTime() / 1000);
+ if (added) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ }
+ },
+
+ onStart() {
+ this.reconciler.startListening();
+ this.reconciler.addChangeListener(this);
+ },
+
+ onStop() {
+ this.reconciler.removeChangeListener(this);
+ this.reconciler.stopListening();
+ },
+};
+
+Object.setPrototypeOf(AddonsTracker.prototype, LegacyTracker.prototype);
+
+export class AddonValidator extends CollectionValidator {
+ constructor(engine = null) {
+ super("addons", "id", ["addonID", "enabled", "applicationID", "source"]);
+ this.engine = engine;
+ }
+
+ async getClientItems() {
+ return lazy.AddonManager.getAllAddons();
+ }
+
+ normalizeClientItem(item) {
+ let enabled = !item.userDisabled;
+ if (item.pendingOperations & lazy.AddonManager.PENDING_ENABLE) {
+ enabled = true;
+ } else if (item.pendingOperations & lazy.AddonManager.PENDING_DISABLE) {
+ enabled = false;
+ }
+ return {
+ enabled,
+ id: item.syncGUID,
+ addonID: item.id,
+ applicationID: Services.appinfo.ID,
+ source: "amo", // check item.foreignInstall?
+ original: item,
+ };
+ }
+
+ async normalizeServerItem(item) {
+ let guid = await this.engine._findDupe(item);
+ if (guid) {
+ item.id = guid;
+ }
+ return item;
+ }
+
+ clientUnderstands(item) {
+ return item.applicationID === Services.appinfo.ID;
+ }
+
+ async syncedByClient(item) {
+ return (
+ !item.original.hidden &&
+ !item.original.isSystem &&
+ !(
+ item.original.pendingOperations & lazy.AddonManager.PENDING_UNINSTALL
+ ) &&
+ // No need to await the returned promise explicitely:
+ // |expr1 && expr2| evaluates to expr2 if expr1 is true.
+ this.engine.isAddonSyncable(item.original, true)
+ );
+ }
+}
diff --git a/services/sync/modules/engines/bookmarks.sys.mjs b/services/sync/modules/engines/bookmarks.sys.mjs
new file mode 100644
index 0000000000..3c1396f67d
--- /dev/null
+++ b/services/sync/modules/engines/bookmarks.sys.mjs
@@ -0,0 +1,950 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import {
+ Changeset,
+ Store,
+ SyncEngine,
+ Tracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Async: "resource://services-common/async.sys.mjs",
+ Observers: "resource://services-common/observers.sys.mjs",
+ PlacesBackups: "resource://gre/modules/PlacesBackups.sys.mjs",
+ PlacesDBUtils: "resource://gre/modules/PlacesDBUtils.sys.mjs",
+ PlacesSyncUtils: "resource://gre/modules/PlacesSyncUtils.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+ Resource: "resource://services-sync/resource.sys.mjs",
+ SyncedBookmarksMirror: "resource://gre/modules/SyncedBookmarksMirror.sys.mjs",
+});
+
+const PLACES_MAINTENANCE_INTERVAL_SECONDS = 4 * 60 * 60; // 4 hours.
+
+const FOLDER_SORTINDEX = 1000000;
+
+// Roots that should be deleted from the server, instead of applied locally.
+// This matches `AndroidBrowserBookmarksRepositorySession::forbiddenGUID`,
+// but allows tags because we don't want to reparent tag folders or tag items
+// to "unfiled".
+const FORBIDDEN_INCOMING_IDS = ["pinned", "places", "readinglist"];
+
+// Items with these parents should be deleted from the server. We allow
+// children of the Places root, to avoid orphaning left pane queries and other
+// descendants of custom roots.
+const FORBIDDEN_INCOMING_PARENT_IDS = ["pinned", "readinglist"];
+
+// The tracker ignores changes made by import and restore, to avoid bumping the
+// score and triggering syncs during the process, as well as changes made by
+// Sync.
+ChromeUtils.defineLazyGetter(lazy, "IGNORED_SOURCES", () => [
+ lazy.PlacesUtils.bookmarks.SOURCES.SYNC,
+ lazy.PlacesUtils.bookmarks.SOURCES.IMPORT,
+ lazy.PlacesUtils.bookmarks.SOURCES.RESTORE,
+ lazy.PlacesUtils.bookmarks.SOURCES.RESTORE_ON_STARTUP,
+ lazy.PlacesUtils.bookmarks.SOURCES.SYNC_REPARENT_REMOVED_FOLDER_CHILDREN,
+]);
+
+// The validation telemetry version for the engine. Version 1 is collected
+// by `bookmark_validator.js`, and checks value as well as structure
+// differences. Version 2 is collected by the engine as part of building the
+// remote tree, and checks structure differences only.
+const BOOKMARK_VALIDATOR_VERSION = 2;
+
+// The maximum time that the engine should wait before aborting a bookmark
+// merge.
+const BOOKMARK_APPLY_TIMEOUT_MS = 5 * 60 * 60 * 1000; // 5 minutes
+
+// The default frecency value to use when not known.
+const FRECENCY_UNKNOWN = -1;
+
+// Returns the constructor for a bookmark record type.
+function getTypeObject(type) {
+ switch (type) {
+ case "bookmark":
+ return Bookmark;
+ case "query":
+ return BookmarkQuery;
+ case "folder":
+ return BookmarkFolder;
+ case "livemark":
+ return Livemark;
+ case "separator":
+ return BookmarkSeparator;
+ case "item":
+ return PlacesItem;
+ }
+ return null;
+}
+
+export function PlacesItem(collection, id, type) {
+ CryptoWrapper.call(this, collection, id);
+ this.type = type || "item";
+}
+
+PlacesItem.prototype = {
+ async decrypt(keyBundle) {
+ // Do the normal CryptoWrapper decrypt, but change types before returning
+ let clear = await CryptoWrapper.prototype.decrypt.call(this, keyBundle);
+
+ // Convert the abstract places item to the actual object type
+ if (!this.deleted) {
+ Object.setPrototypeOf(this, this.getTypeObject(this.type).prototype);
+ }
+
+ return clear;
+ },
+
+ getTypeObject: function PlacesItem_getTypeObject(type) {
+ let recordObj = getTypeObject(type);
+ if (!recordObj) {
+ throw new Error("Unknown places item object type: " + type);
+ }
+ return recordObj;
+ },
+
+ _logName: "Sync.Record.PlacesItem",
+
+ // Converts the record to a Sync bookmark object that can be passed to
+ // `PlacesSyncUtils.bookmarks.{insert, update}`.
+ toSyncBookmark() {
+ let result = {
+ kind: this.type,
+ recordId: this.id,
+ parentRecordId: this.parentid,
+ };
+ let dateAdded = lazy.PlacesSyncUtils.bookmarks.ratchetTimestampBackwards(
+ this.dateAdded,
+ +this.modified * 1000
+ );
+ if (dateAdded > 0) {
+ result.dateAdded = dateAdded;
+ }
+ return result;
+ },
+
+ // Populates the record from a Sync bookmark object returned from
+ // `PlacesSyncUtils.bookmarks.fetch`.
+ fromSyncBookmark(item) {
+ this.parentid = item.parentRecordId;
+ this.parentName = item.parentTitle;
+ if (item.dateAdded) {
+ this.dateAdded = item.dateAdded;
+ }
+ },
+};
+
+Object.setPrototypeOf(PlacesItem.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(PlacesItem, "cleartext", [
+ "hasDupe",
+ "parentid",
+ "parentName",
+ "type",
+ "dateAdded",
+]);
+
+export function Bookmark(collection, id, type) {
+ PlacesItem.call(this, collection, id, type || "bookmark");
+}
+
+Bookmark.prototype = {
+ _logName: "Sync.Record.Bookmark",
+
+ toSyncBookmark() {
+ let info = PlacesItem.prototype.toSyncBookmark.call(this);
+ info.title = this.title;
+ info.url = this.bmkUri;
+ info.description = this.description;
+ info.tags = this.tags;
+ info.keyword = this.keyword;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.title = item.title;
+ this.bmkUri = item.url.href;
+ this.description = item.description;
+ this.tags = item.tags;
+ this.keyword = item.keyword;
+ },
+};
+
+Object.setPrototypeOf(Bookmark.prototype, PlacesItem.prototype);
+
+Utils.deferGetSet(Bookmark, "cleartext", [
+ "title",
+ "bmkUri",
+ "description",
+ "tags",
+ "keyword",
+]);
+
+export function BookmarkQuery(collection, id) {
+ Bookmark.call(this, collection, id, "query");
+}
+
+BookmarkQuery.prototype = {
+ _logName: "Sync.Record.BookmarkQuery",
+
+ toSyncBookmark() {
+ let info = Bookmark.prototype.toSyncBookmark.call(this);
+ info.folder = this.folderName || undefined; // empty string -> undefined
+ info.query = this.queryId;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ Bookmark.prototype.fromSyncBookmark.call(this, item);
+ this.folderName = item.folder || undefined; // empty string -> undefined
+ this.queryId = item.query;
+ },
+};
+
+Object.setPrototypeOf(BookmarkQuery.prototype, Bookmark.prototype);
+
+Utils.deferGetSet(BookmarkQuery, "cleartext", ["folderName", "queryId"]);
+
+export function BookmarkFolder(collection, id, type) {
+ PlacesItem.call(this, collection, id, type || "folder");
+}
+
+BookmarkFolder.prototype = {
+ _logName: "Sync.Record.Folder",
+
+ toSyncBookmark() {
+ let info = PlacesItem.prototype.toSyncBookmark.call(this);
+ info.description = this.description;
+ info.title = this.title;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.title = item.title;
+ this.description = item.description;
+ this.children = item.childRecordIds;
+ },
+};
+
+Object.setPrototypeOf(BookmarkFolder.prototype, PlacesItem.prototype);
+
+Utils.deferGetSet(BookmarkFolder, "cleartext", [
+ "description",
+ "title",
+ "children",
+]);
+
+export function Livemark(collection, id) {
+ BookmarkFolder.call(this, collection, id, "livemark");
+}
+
+Livemark.prototype = {
+ _logName: "Sync.Record.Livemark",
+
+ toSyncBookmark() {
+ let info = BookmarkFolder.prototype.toSyncBookmark.call(this);
+ info.feed = this.feedUri;
+ info.site = this.siteUri;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ BookmarkFolder.prototype.fromSyncBookmark.call(this, item);
+ this.feedUri = item.feed.href;
+ if (item.site) {
+ this.siteUri = item.site.href;
+ }
+ },
+};
+
+Object.setPrototypeOf(Livemark.prototype, BookmarkFolder.prototype);
+
+Utils.deferGetSet(Livemark, "cleartext", ["siteUri", "feedUri"]);
+
+export function BookmarkSeparator(collection, id) {
+ PlacesItem.call(this, collection, id, "separator");
+}
+
+BookmarkSeparator.prototype = {
+ _logName: "Sync.Record.Separator",
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.pos = item.index;
+ },
+};
+
+Object.setPrototypeOf(BookmarkSeparator.prototype, PlacesItem.prototype);
+
+Utils.deferGetSet(BookmarkSeparator, "cleartext", "pos");
+
+/**
+ * The bookmarks engine uses a different store that stages downloaded bookmarks
+ * in a separate database, instead of writing directly to Places. The buffer
+ * handles reconciliation, so we stub out `_reconcile`, and wait to pull changes
+ * until we're ready to upload.
+ */
+export function BookmarksEngine(service) {
+ SyncEngine.call(this, "Bookmarks", service);
+}
+
+BookmarksEngine.prototype = {
+ _recordObj: PlacesItem,
+ _trackerObj: BookmarksTracker,
+ _storeObj: BookmarksStore,
+ version: 2,
+ // Used to override the engine name in telemetry, so that we can distinguish
+ // this engine from the old, now removed non-buffered engine.
+ overrideTelemetryName: "bookmarks-buffered",
+
+ // Needed to ensure we don't miss items when resuming a sync that failed or
+ // aborted early.
+ _defaultSort: "oldest",
+
+ syncPriority: 4,
+ allowSkippedRecord: false,
+
+ async _ensureCurrentSyncID(newSyncID) {
+ await lazy.PlacesSyncUtils.bookmarks.ensureCurrentSyncId(newSyncID);
+ let buf = await this._store.ensureOpenMirror();
+ await buf.ensureCurrentSyncId(newSyncID);
+ },
+
+ async ensureCurrentSyncID(newSyncID) {
+ let shouldWipeRemote =
+ await lazy.PlacesSyncUtils.bookmarks.shouldWipeRemote();
+ if (!shouldWipeRemote) {
+ this._log.debug(
+ "Checking if server sync ID ${newSyncID} matches existing",
+ { newSyncID }
+ );
+ await this._ensureCurrentSyncID(newSyncID);
+ return newSyncID;
+ }
+ // We didn't take the new sync ID because we need to wipe the server
+ // and other clients after a restore. Send the command, wipe the
+ // server, and reset our sync ID to reupload everything.
+ this._log.debug(
+ "Ignoring server sync ID ${newSyncID} after restore; " +
+ "wiping server and resetting sync ID",
+ { newSyncID }
+ );
+ await this.service.clientsEngine.sendCommand(
+ "wipeEngine",
+ [this.name],
+ null,
+ { reason: "bookmark-restore" }
+ );
+ let assignedSyncID = await this.resetSyncID();
+ return assignedSyncID;
+ },
+
+ async getSyncID() {
+ return lazy.PlacesSyncUtils.bookmarks.getSyncId();
+ },
+
+ async resetSyncID() {
+ await this._deleteServerCollection();
+ return this.resetLocalSyncID();
+ },
+
+ async resetLocalSyncID() {
+ let newSyncID = await lazy.PlacesSyncUtils.bookmarks.resetSyncId();
+ this._log.debug("Assigned new sync ID ${newSyncID}", { newSyncID });
+ let buf = await this._store.ensureOpenMirror();
+ await buf.ensureCurrentSyncId(newSyncID);
+ return newSyncID;
+ },
+
+ async getLastSync() {
+ let mirror = await this._store.ensureOpenMirror();
+ return mirror.getCollectionHighWaterMark();
+ },
+
+ async setLastSync(lastSync) {
+ let mirror = await this._store.ensureOpenMirror();
+ await mirror.setCollectionLastModified(lastSync);
+ // Update the last sync time in Places so that reverting to the original
+ // bookmarks engine doesn't download records we've already applied.
+ await lazy.PlacesSyncUtils.bookmarks.setLastSync(lastSync);
+ },
+
+ async _syncStartup() {
+ await super._syncStartup();
+
+ try {
+ // For first syncs, back up the user's bookmarks.
+ let lastSync = await this.getLastSync();
+ if (!lastSync) {
+ this._log.debug("Bookmarks backup starting");
+ await lazy.PlacesBackups.create(null, true);
+ this._log.debug("Bookmarks backup done");
+ }
+ } catch (ex) {
+ // Failure to create a backup is somewhat bad, but probably not bad
+ // enough to prevent syncing of bookmarks - so just log the error and
+ // continue.
+ this._log.warn(
+ "Error while backing up bookmarks, but continuing with sync",
+ ex
+ );
+ }
+ },
+
+ async _sync() {
+ try {
+ await super._sync();
+ if (this._ranMaintenanceOnLastSync) {
+ // If the last sync failed, we ran maintenance, and this sync succeeded,
+ // maintenance likely fixed the issue.
+ this._ranMaintenanceOnLastSync = false;
+ this.service.recordTelemetryEvent("maintenance", "fix", "bookmarks");
+ }
+ } catch (ex) {
+ if (
+ lazy.Async.isShutdownException(ex) ||
+ ex.status > 0 ||
+ ex.name == "InterruptedError"
+ ) {
+ // Don't run maintenance on shutdown or HTTP errors, or if we aborted
+ // the sync because the user changed their bookmarks during merging.
+ throw ex;
+ }
+ if (ex.name == "MergeConflictError") {
+ this._log.warn(
+ "Bookmark syncing ran into a merge conflict error...will retry later"
+ );
+ return;
+ }
+ // Run Places maintenance periodically to try to recover from corruption
+ // that might have caused the sync to fail. We cap the interval because
+ // persistent failures likely indicate a problem that won't be fixed by
+ // running maintenance after every failed sync.
+ let elapsedSinceMaintenance =
+ Date.now() / 1000 -
+ Services.prefs.getIntPref("places.database.lastMaintenance", 0);
+ if (elapsedSinceMaintenance >= PLACES_MAINTENANCE_INTERVAL_SECONDS) {
+ this._log.error(
+ "Bookmark sync failed, ${elapsedSinceMaintenance}s " +
+ "elapsed since last run; running Places maintenance",
+ { elapsedSinceMaintenance }
+ );
+ await lazy.PlacesDBUtils.maintenanceOnIdle();
+ this._ranMaintenanceOnLastSync = true;
+ this.service.recordTelemetryEvent("maintenance", "run", "bookmarks");
+ } else {
+ this._ranMaintenanceOnLastSync = false;
+ }
+ throw ex;
+ }
+ },
+
+ async _syncFinish() {
+ await SyncEngine.prototype._syncFinish.call(this);
+ await lazy.PlacesSyncUtils.bookmarks.ensureMobileQuery();
+ },
+
+ async pullAllChanges() {
+ return this.pullNewChanges();
+ },
+
+ async trackRemainingChanges() {
+ let changes = this._modified.changes;
+ await lazy.PlacesSyncUtils.bookmarks.pushChanges(changes);
+ },
+
+ _deleteId(id) {
+ this._noteDeletedId(id);
+ },
+
+ // The bookmarks engine rarely calls this method directly, except in tests or
+ // when handling a `reset{All, Engine}` command from another client. We
+ // usually reset local Sync metadata on a sync ID mismatch, which both engines
+ // override with logic that lives in Places and the mirror.
+ async _resetClient() {
+ await super._resetClient();
+ await lazy.PlacesSyncUtils.bookmarks.reset();
+ let buf = await this._store.ensureOpenMirror();
+ await buf.reset();
+ },
+
+ // Cleans up the Places root, reading list items (ignored in bug 762118,
+ // removed in bug 1155684), and pinned sites.
+ _shouldDeleteRemotely(incomingItem) {
+ return (
+ FORBIDDEN_INCOMING_IDS.includes(incomingItem.id) ||
+ FORBIDDEN_INCOMING_PARENT_IDS.includes(incomingItem.parentid)
+ );
+ },
+
+ emptyChangeset() {
+ return new BookmarksChangeset();
+ },
+
+ async _apply() {
+ let buf = await this._store.ensureOpenMirror();
+ let watchdog = this._newWatchdog();
+ watchdog.start(BOOKMARK_APPLY_TIMEOUT_MS);
+
+ try {
+ let recordsToUpload = await buf.apply({
+ remoteTimeSeconds: lazy.Resource.serverTime,
+ signal: watchdog.signal,
+ });
+ this._modified.replace(recordsToUpload);
+ } finally {
+ watchdog.stop();
+ if (watchdog.abortReason) {
+ this._log.warn(`Aborting bookmark merge: ${watchdog.abortReason}`);
+ }
+ }
+ },
+
+ async _processIncoming(newitems) {
+ await super._processIncoming(newitems);
+ await this._apply();
+ },
+
+ async _reconcile(item) {
+ return true;
+ },
+
+ async _createRecord(id) {
+ let record = await this._doCreateRecord(id);
+ if (!record.deleted) {
+ // Set hasDupe on all (non-deleted) records since we don't use it and we
+ // want to minimize the risk of older clients corrupting records. Note
+ // that the SyncedBookmarksMirror sets it for all records that it created,
+ // but we would like to ensure that weakly uploaded records are marked as
+ // hasDupe as well.
+ record.hasDupe = true;
+ }
+ return record;
+ },
+
+ async _doCreateRecord(id) {
+ let change = this._modified.changes[id];
+ if (!change) {
+ this._log.error(
+ "Creating record for item ${id} not in strong changeset",
+ { id }
+ );
+ throw new TypeError("Can't create record for unchanged item");
+ }
+ let record = this._recordFromCleartext(id, change.cleartext);
+ record.sortindex = await this._store._calculateIndex(record);
+ return record;
+ },
+
+ _recordFromCleartext(id, cleartext) {
+ let recordObj = getTypeObject(cleartext.type);
+ if (!recordObj) {
+ this._log.warn(
+ "Creating record for item ${id} with unknown type ${type}",
+ { id, type: cleartext.type }
+ );
+ recordObj = PlacesItem;
+ }
+ let record = new recordObj(this.name, id);
+ record.cleartext = cleartext;
+ return record;
+ },
+
+ async pullChanges() {
+ return {};
+ },
+
+ /**
+ * Writes successfully uploaded records back to the mirror, so that the
+ * mirror matches the server. We update the mirror before updating Places,
+ * which has implications for interrupted syncs.
+ *
+ * 1. Sync interrupted during upload; server doesn't support atomic uploads.
+ * We'll download and reapply everything that we uploaded before the
+ * interruption. All locally changed items retain their change counters.
+ * 2. Sync interrupted during upload; atomic uploads enabled. The server
+ * discards the batch. All changed local items retain their change
+ * counters, so the next sync resumes cleanly.
+ * 3. Sync interrupted during upload; outgoing records can't fit in a single
+ * batch. We'll download and reapply all records through the most recent
+ * committed batch. This is a variation of (1).
+ * 4. Sync interrupted after we update the mirror, but before cleanup. The
+ * mirror matches the server, but locally changed items retain their change
+ * counters. Reuploading them on the next sync should be idempotent, though
+ * unnecessary. If another client makes a conflicting remote change before
+ * we sync again, we may incorrectly prefer the local state.
+ * 5. Sync completes successfully. We'll update the mirror, and reset the
+ * change counters for all items.
+ */
+ async _onRecordsWritten(succeeded, failed, serverModifiedTime) {
+ let records = [];
+ for (let id of succeeded) {
+ let change = this._modified.changes[id];
+ if (!change) {
+ // TODO (Bug 1433178): Write weakly uploaded records back to the mirror.
+ this._log.info("Uploaded record not in strong changeset", id);
+ continue;
+ }
+ if (!change.synced) {
+ this._log.info("Record in strong changeset not uploaded", id);
+ continue;
+ }
+ let cleartext = change.cleartext;
+ if (!cleartext) {
+ this._log.error(
+ "Missing Sync record cleartext for ${id} in ${change}",
+ { id, change }
+ );
+ throw new TypeError("Missing cleartext for uploaded Sync record");
+ }
+ let record = this._recordFromCleartext(id, cleartext);
+ record.modified = serverModifiedTime;
+ records.push(record);
+ }
+ let buf = await this._store.ensureOpenMirror();
+ await buf.store(records, { needsMerge: false });
+ },
+
+ async finalize() {
+ await super.finalize();
+ await this._store.finalize();
+ },
+};
+
+Object.setPrototypeOf(BookmarksEngine.prototype, SyncEngine.prototype);
+
+/**
+ * The bookmarks store delegates to the mirror for staging and applying
+ * records. Most `Store` methods intentionally remain abstract, so you can't use
+ * this store to create or update bookmarks in Places. All changes must go
+ * through the mirror, which takes care of merging and producing a valid tree.
+ */
+function BookmarksStore(name, engine) {
+ Store.call(this, name, engine);
+}
+
+BookmarksStore.prototype = {
+ _openMirrorPromise: null,
+
+ // For tests.
+ _batchChunkSize: 500,
+
+ // Create a record starting from the weave id (places guid)
+ async createRecord(id, collection) {
+ let item = await lazy.PlacesSyncUtils.bookmarks.fetch(id);
+ if (!item) {
+ // deleted item
+ let record = new PlacesItem(collection, id);
+ record.deleted = true;
+ return record;
+ }
+
+ let recordObj = getTypeObject(item.kind);
+ if (!recordObj) {
+ this._log.warn("Unknown item type, cannot serialize: " + item.kind);
+ recordObj = PlacesItem;
+ }
+ let record = new recordObj(collection, id);
+ record.fromSyncBookmark(item);
+
+ record.sortindex = await this._calculateIndex(record);
+
+ return record;
+ },
+
+ async _calculateIndex(record) {
+ // Ensure folders have a very high sort index so they're not synced last.
+ if (record.type == "folder") {
+ return FOLDER_SORTINDEX;
+ }
+
+ // For anything directly under the toolbar, give it a boost of more than an
+ // unvisited bookmark
+ let index = 0;
+ if (record.parentid == "toolbar") {
+ index += 150;
+ }
+
+ // Add in the bookmark's frecency if we have something.
+ if (record.bmkUri != null) {
+ let frecency = FRECENCY_UNKNOWN;
+ try {
+ frecency = await lazy.PlacesSyncUtils.history.fetchURLFrecency(
+ record.bmkUri
+ );
+ } catch (ex) {
+ this._log.warn(
+ `Failed to fetch frecency for ${record.id}; assuming default`,
+ ex
+ );
+ this._log.trace("Record {id} has invalid URL ${bmkUri}", record);
+ }
+ if (frecency != FRECENCY_UNKNOWN) {
+ index += frecency;
+ }
+ }
+
+ return index;
+ },
+
+ async wipe() {
+ // Save a backup before clearing out all bookmarks.
+ await lazy.PlacesBackups.create(null, true);
+ await lazy.PlacesSyncUtils.bookmarks.wipe();
+ },
+
+ ensureOpenMirror() {
+ if (!this._openMirrorPromise) {
+ this._openMirrorPromise = this._openMirror().catch(err => {
+ // We may have failed to open the mirror temporarily; for example, if
+ // the database is locked. Clear the promise so that subsequent
+ // `ensureOpenMirror` calls can try to open the mirror again.
+ this._openMirrorPromise = null;
+ throw err;
+ });
+ }
+ return this._openMirrorPromise;
+ },
+
+ async _openMirror() {
+ let mirrorPath = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ "bookmarks.sqlite"
+ );
+ await IOUtils.makeDirectory(PathUtils.parent(mirrorPath), {
+ createAncestors: true,
+ });
+
+ return lazy.SyncedBookmarksMirror.open({
+ path: mirrorPath,
+ recordStepTelemetry: (name, took, counts) => {
+ lazy.Observers.notify(
+ "weave:engine:sync:step",
+ {
+ name,
+ took,
+ counts,
+ },
+ this.name
+ );
+ },
+ recordValidationTelemetry: (took, checked, problems) => {
+ lazy.Observers.notify(
+ "weave:engine:validate:finish",
+ {
+ version: BOOKMARK_VALIDATOR_VERSION,
+ took,
+ checked,
+ problems,
+ },
+ this.name
+ );
+ },
+ });
+ },
+
+ async applyIncomingBatch(records, countTelemetry) {
+ let buf = await this.ensureOpenMirror();
+ for (let chunk of lazy.PlacesUtils.chunkArray(
+ records,
+ this._batchChunkSize
+ )) {
+ await buf.store(chunk);
+ }
+ // Array of failed records.
+ return [];
+ },
+
+ async applyIncoming(record) {
+ let buf = await this.ensureOpenMirror();
+ await buf.store([record]);
+ },
+
+ async finalize() {
+ if (!this._openMirrorPromise) {
+ return;
+ }
+ let buf = await this._openMirrorPromise;
+ await buf.finalize();
+ },
+};
+
+Object.setPrototypeOf(BookmarksStore.prototype, Store.prototype);
+
+// The bookmarks tracker is a special flower. Instead of listening for changes
+// via observer notifications, it queries Places for the set of items that have
+// changed since the last sync. Because it's a "pull-based" tracker, it ignores
+// all concepts of "add a changed ID." However, it still registers an observer
+// to bump the score, so that changed bookmarks are synced immediately.
+function BookmarksTracker(name, engine) {
+ Tracker.call(this, name, engine);
+}
+BookmarksTracker.prototype = {
+ onStart() {
+ this._placesListener = new PlacesWeakCallbackWrapper(
+ this.handlePlacesEvents.bind(this)
+ );
+ lazy.PlacesUtils.observers.addListener(
+ [
+ "bookmark-added",
+ "bookmark-removed",
+ "bookmark-moved",
+ "bookmark-guid-changed",
+ "bookmark-keyword-changed",
+ "bookmark-tags-changed",
+ "bookmark-time-changed",
+ "bookmark-title-changed",
+ "bookmark-url-changed",
+ ],
+ this._placesListener
+ );
+ Svc.Obs.add("bookmarks-restore-begin", this);
+ Svc.Obs.add("bookmarks-restore-success", this);
+ Svc.Obs.add("bookmarks-restore-failed", this);
+ },
+
+ onStop() {
+ lazy.PlacesUtils.observers.removeListener(
+ [
+ "bookmark-added",
+ "bookmark-removed",
+ "bookmark-moved",
+ "bookmark-guid-changed",
+ "bookmark-keyword-changed",
+ "bookmark-tags-changed",
+ "bookmark-time-changed",
+ "bookmark-title-changed",
+ "bookmark-url-changed",
+ ],
+ this._placesListener
+ );
+ Svc.Obs.remove("bookmarks-restore-begin", this);
+ Svc.Obs.remove("bookmarks-restore-success", this);
+ Svc.Obs.remove("bookmarks-restore-failed", this);
+ },
+
+ async getChangedIDs() {
+ return lazy.PlacesSyncUtils.bookmarks.pullChanges();
+ },
+
+ observe(subject, topic, data) {
+ switch (topic) {
+ case "bookmarks-restore-begin":
+ this._log.debug("Ignoring changes from importing bookmarks.");
+ break;
+ case "bookmarks-restore-success":
+ this._log.debug("Tracking all items on successful import.");
+
+ if (data == "json") {
+ this._log.debug(
+ "Restore succeeded: wiping server and other clients."
+ );
+ // Trigger an immediate sync. `ensureCurrentSyncID` will notice we
+ // restored, wipe the server and other clients, reset the sync ID, and
+ // upload the restored tree.
+ this.score += SCORE_INCREMENT_XLARGE;
+ } else {
+ // "html", "html-initial", or "json-append"
+ this._log.debug("Import succeeded.");
+ }
+ break;
+ case "bookmarks-restore-failed":
+ this._log.debug("Tracking all items on failed import.");
+ break;
+ }
+ },
+
+ QueryInterface: ChromeUtils.generateQI(["nsISupportsWeakReference"]),
+
+ /* Every add/remove/change will trigger a sync for MULTI_DEVICE */
+ _upScore: function BMT__upScore() {
+ this.score += SCORE_INCREMENT_XLARGE;
+ },
+
+ handlePlacesEvents(events) {
+ for (let event of events) {
+ switch (event.type) {
+ case "bookmark-added":
+ case "bookmark-removed":
+ case "bookmark-moved":
+ case "bookmark-keyword-changed":
+ case "bookmark-tags-changed":
+ case "bookmark-time-changed":
+ case "bookmark-title-changed":
+ case "bookmark-url-changed":
+ if (lazy.IGNORED_SOURCES.includes(event.source)) {
+ continue;
+ }
+
+ this._log.trace(`'${event.type}': ${event.id}`);
+ this._upScore();
+ break;
+ case "bookmark-guid-changed":
+ if (event.source !== lazy.PlacesUtils.bookmarks.SOURCES.SYNC) {
+ this._log.warn(
+ "The source of bookmark-guid-changed event shoud be sync."
+ );
+ continue;
+ }
+
+ this._log.trace(`'${event.type}': ${event.id}`);
+ this._upScore();
+ break;
+ case "purge-caches":
+ this._log.trace("purge-caches");
+ this._upScore();
+ break;
+ }
+ }
+ },
+};
+
+Object.setPrototypeOf(BookmarksTracker.prototype, Tracker.prototype);
+
+/**
+ * A changeset that stores extra metadata in a change record for each ID. The
+ * engine updates this metadata when uploading Sync records, and writes it back
+ * to Places in `BookmarksEngine#trackRemainingChanges`.
+ *
+ * The `synced` property on a change record means its corresponding item has
+ * been uploaded, and we should pretend it doesn't exist in the changeset.
+ */
+class BookmarksChangeset extends Changeset {
+ // Only `_reconcile` calls `getModifiedTimestamp` and `has`, and the engine
+ // does its own reconciliation.
+ getModifiedTimestamp(id) {
+ throw new Error("Don't use timestamps to resolve bookmark conflicts");
+ }
+
+ has(id) {
+ throw new Error("Don't use the changeset to resolve bookmark conflicts");
+ }
+
+ delete(id) {
+ let change = this.changes[id];
+ if (change) {
+ // Mark the change as synced without removing it from the set. We do this
+ // so that we can update Places in `trackRemainingChanges`.
+ change.synced = true;
+ }
+ }
+
+ ids() {
+ let results = new Set();
+ for (let id in this.changes) {
+ if (!this.changes[id].synced) {
+ results.add(id);
+ }
+ }
+ return [...results];
+ }
+}
diff --git a/services/sync/modules/engines/clients.sys.mjs b/services/sync/modules/engines/clients.sys.mjs
new file mode 100644
index 0000000000..eda92bd75b
--- /dev/null
+++ b/services/sync/modules/engines/clients.sys.mjs
@@ -0,0 +1,1122 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * How does the clients engine work?
+ *
+ * - We use 2 files - commands.json and commands-syncing.json.
+ *
+ * - At sync upload time, we attempt a rename of commands.json to
+ * commands-syncing.json, and ignore errors (helps for crash during sync!).
+ * - We load commands-syncing.json and stash the contents in
+ * _currentlySyncingCommands which lives for the duration of the upload process.
+ * - We use _currentlySyncingCommands to build the outgoing records
+ * - Immediately after successful upload, we delete commands-syncing.json from
+ * disk (and clear _currentlySyncingCommands). We reconcile our local records
+ * with what we just wrote in the server, and add failed IDs commands
+ * back in commands.json
+ * - Any time we need to "save" a command for future syncs, we load
+ * commands.json, update it, and write it back out.
+ */
+
+import { Async } from "resource://services-common/async.sys.mjs";
+
+import {
+ DEVICE_TYPE_DESKTOP,
+ DEVICE_TYPE_MOBILE,
+ SINGLE_USER_THRESHOLD,
+ SYNC_API_VERSION,
+} from "resource://services-sync/constants.sys.mjs";
+
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+import { PREF_ACCOUNT_ROOT } from "resource://gre/modules/FxAccountsCommon.sys.mjs";
+
+const CLIENTS_TTL = 15552000; // 180 days
+const CLIENTS_TTL_REFRESH = 604800; // 7 days
+const STALE_CLIENT_REMOTE_AGE = 604800; // 7 days
+
+// TTL of the message sent to another device when sending a tab
+const NOTIFY_TAB_SENT_TTL_SECS = 1 * 3600; // 1 hour
+
+// How often we force a refresh of the FxA device list.
+const REFRESH_FXA_DEVICE_INTERVAL_MS = 2 * 60 * 60 * 1000; // 2 hours
+
+// Reasons behind sending collection_changed push notifications.
+const COLLECTION_MODIFIED_REASON_SENDTAB = "sendtab";
+const COLLECTION_MODIFIED_REASON_FIRSTSYNC = "firstsync";
+
+const SUPPORTED_PROTOCOL_VERSIONS = [SYNC_API_VERSION];
+const LAST_MODIFIED_ON_PROCESS_COMMAND_PREF =
+ "services.sync.clients.lastModifiedOnProcessCommands";
+
+function hasDupeCommand(commands, action) {
+ if (!commands) {
+ return false;
+ }
+ return commands.some(
+ other =>
+ other.command == action.command &&
+ Utils.deepEquals(other.args, action.args)
+ );
+}
+
+export function ClientsRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+ClientsRec.prototype = {
+ _logName: "Sync.Record.Clients",
+ ttl: CLIENTS_TTL,
+};
+Object.setPrototypeOf(ClientsRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(ClientsRec, "cleartext", [
+ "name",
+ "type",
+ "commands",
+ "version",
+ "protocols",
+ "formfactor",
+ "os",
+ "appPackage",
+ "application",
+ "device",
+ "fxaDeviceId",
+]);
+
+export function ClientEngine(service) {
+ SyncEngine.call(this, "Clients", service);
+
+ this.fxAccounts = lazy.fxAccounts;
+ this.addClientCommandQueue = Async.asyncQueueCaller(this._log);
+ Utils.defineLazyIDProperty(this, "localID", "services.sync.client.GUID");
+}
+
+ClientEngine.prototype = {
+ _storeObj: ClientStore,
+ _recordObj: ClientsRec,
+ _trackerObj: ClientsTracker,
+ allowSkippedRecord: false,
+ _knownStaleFxADeviceIds: null,
+ _lastDeviceCounts: null,
+ _lastFxaDeviceRefresh: 0,
+
+ async initialize() {
+ // Reset the last sync timestamp on every startup so that we fetch all clients
+ await this.resetLastSync();
+ },
+
+ // These two properties allow us to avoid replaying the same commands
+ // continuously if we cannot manage to upload our own record.
+ _localClientLastModified: 0,
+ get _lastModifiedOnProcessCommands() {
+ return Services.prefs.getIntPref(LAST_MODIFIED_ON_PROCESS_COMMAND_PREF, -1);
+ },
+
+ set _lastModifiedOnProcessCommands(value) {
+ Services.prefs.setIntPref(LAST_MODIFIED_ON_PROCESS_COMMAND_PREF, value);
+ },
+
+ get isFirstSync() {
+ return !this.lastRecordUpload;
+ },
+
+ // Always sync client data as it controls other sync behavior
+ get enabled() {
+ return true;
+ },
+
+ get lastRecordUpload() {
+ return Svc.PrefBranch.getIntPref(this.name + ".lastRecordUpload", 0);
+ },
+ set lastRecordUpload(value) {
+ Svc.PrefBranch.setIntPref(
+ this.name + ".lastRecordUpload",
+ Math.floor(value)
+ );
+ },
+
+ get remoteClients() {
+ // return all non-stale clients for external consumption.
+ return Object.values(this._store._remoteClients).filter(v => !v.stale);
+ },
+
+ remoteClient(id) {
+ let client = this._store._remoteClients[id];
+ return client && !client.stale ? client : null;
+ },
+
+ remoteClientExists(id) {
+ return !!this.remoteClient(id);
+ },
+
+ // Aggregate some stats on the composition of clients on this account
+ get stats() {
+ let stats = {
+ hasMobile: this.localType == DEVICE_TYPE_MOBILE,
+ names: [this.localName],
+ numClients: 1,
+ };
+
+ for (let id in this._store._remoteClients) {
+ let { name, type, stale } = this._store._remoteClients[id];
+ if (!stale) {
+ stats.hasMobile = stats.hasMobile || type == DEVICE_TYPE_MOBILE;
+ stats.names.push(name);
+ stats.numClients++;
+ }
+ }
+
+ return stats;
+ },
+
+ /**
+ * Obtain information about device types.
+ *
+ * Returns a Map of device types to integer counts. Guaranteed to include
+ * "desktop" (which will have at least 1 - this device) and "mobile" (which
+ * may have zero) counts. It almost certainly will include only these 2.
+ */
+ get deviceTypes() {
+ let counts = new Map();
+
+ counts.set(this.localType, 1); // currently this must be DEVICE_TYPE_DESKTOP
+ counts.set(DEVICE_TYPE_MOBILE, 0);
+
+ for (let id in this._store._remoteClients) {
+ let record = this._store._remoteClients[id];
+ if (record.stale) {
+ continue; // pretend "stale" records don't exist.
+ }
+ let type = record.type;
+ if (!counts.has(type)) {
+ counts.set(type, 0);
+ }
+
+ counts.set(type, counts.get(type) + 1);
+ }
+
+ return counts;
+ },
+
+ get brandName() {
+ let brand = Services.strings.createBundle(
+ "chrome://branding/locale/brand.properties"
+ );
+ return brand.GetStringFromName("brandShortName");
+ },
+
+ get localName() {
+ return this.fxAccounts.device.getLocalName();
+ },
+ set localName(value) {
+ this.fxAccounts.device.setLocalName(value);
+ },
+
+ get localType() {
+ return this.fxAccounts.device.getLocalType();
+ },
+
+ getClientName(id) {
+ if (id == this.localID) {
+ return this.localName;
+ }
+ let client = this._store._remoteClients[id];
+ if (!client) {
+ return "";
+ }
+ // Sometimes the sync clients don't always correctly update the device name
+ // However FxA always does, so try to pull the name from there first
+ let fxaDevice = this.fxAccounts.device.recentDeviceList?.find(
+ device => device.id === client.fxaDeviceId
+ );
+
+ // should be very rare, but could happen if we have yet to fetch devices,
+ // or the client recently disconnected
+ if (!fxaDevice) {
+ this._log.warn(
+ "Couldn't find associated FxA device, falling back to client name"
+ );
+ return client.name;
+ }
+ return fxaDevice.name;
+ },
+
+ getClientFxaDeviceId(id) {
+ if (this._store._remoteClients[id]) {
+ return this._store._remoteClients[id].fxaDeviceId;
+ }
+ return null;
+ },
+
+ getClientByFxaDeviceId(fxaDeviceId) {
+ for (let id in this._store._remoteClients) {
+ let client = this._store._remoteClients[id];
+ if (client.stale) {
+ continue;
+ }
+ if (client.fxaDeviceId == fxaDeviceId) {
+ return client;
+ }
+ }
+ return null;
+ },
+
+ getClientType(id) {
+ const client = this._store._remoteClients[id];
+ if (client.type == DEVICE_TYPE_DESKTOP) {
+ return "desktop";
+ }
+ if (client.formfactor && client.formfactor.includes("tablet")) {
+ return "tablet";
+ }
+ return "phone";
+ },
+
+ async _readCommands() {
+ let commands = await Utils.jsonLoad("commands", this);
+ return commands || {};
+ },
+
+ /**
+ * Low level function, do not use directly (use _addClientCommand instead).
+ */
+ async _saveCommands(commands) {
+ try {
+ await Utils.jsonSave("commands", this, commands);
+ } catch (error) {
+ this._log.error("Failed to save JSON outgoing commands", error);
+ }
+ },
+
+ async _prepareCommandsForUpload() {
+ try {
+ await Utils.jsonMove("commands", "commands-syncing", this);
+ } catch (e) {
+ // Ignore errors
+ }
+ let commands = await Utils.jsonLoad("commands-syncing", this);
+ return commands || {};
+ },
+
+ async _deleteUploadedCommands() {
+ delete this._currentlySyncingCommands;
+ try {
+ await Utils.jsonRemove("commands-syncing", this);
+ } catch (err) {
+ this._log.error("Failed to delete syncing-commands file", err);
+ }
+ },
+
+ // Gets commands for a client we are yet to write to the server. Doesn't
+ // include commands for that client which are already on the server.
+ // We should rename this!
+ async getClientCommands(clientId) {
+ const allCommands = await this._readCommands();
+ return allCommands[clientId] || [];
+ },
+
+ async removeLocalCommand(command) {
+ // the implementation of this engine is such that adding a command to
+ // the local client is how commands are deleted! ¯\_(ツ)_/¯
+ await this._addClientCommand(this.localID, command);
+ },
+
+ async _addClientCommand(clientId, command) {
+ this.addClientCommandQueue.enqueueCall(async () => {
+ try {
+ const localCommands = await this._readCommands();
+ const localClientCommands = localCommands[clientId] || [];
+ const remoteClient = this._store._remoteClients[clientId];
+ let remoteClientCommands = [];
+ if (remoteClient && remoteClient.commands) {
+ remoteClientCommands = remoteClient.commands;
+ }
+ const clientCommands = localClientCommands.concat(remoteClientCommands);
+ if (hasDupeCommand(clientCommands, command)) {
+ return false;
+ }
+ localCommands[clientId] = localClientCommands.concat(command);
+ await this._saveCommands(localCommands);
+ return true;
+ } catch (e) {
+ // Failing to save a command should not "break the queue" of pending operations.
+ this._log.error(e);
+ return false;
+ }
+ });
+
+ return this.addClientCommandQueue.promiseCallsComplete();
+ },
+
+ async _removeClientCommands(clientId) {
+ const allCommands = await this._readCommands();
+ delete allCommands[clientId];
+ await this._saveCommands(allCommands);
+ },
+
+ async updateKnownStaleClients() {
+ this._log.debug("Updating the known stale clients");
+ // _fetchFxADevices side effect updates this._knownStaleFxADeviceIds.
+ await this._fetchFxADevices();
+ let localFxADeviceId = await lazy.fxAccounts.device.getLocalId();
+ // Process newer records first, so that if we hit a record with a device ID
+ // we've seen before, we can mark it stale immediately.
+ let clientList = Object.values(this._store._remoteClients).sort(
+ (a, b) => b.serverLastModified - a.serverLastModified
+ );
+ let seenDeviceIds = new Set([localFxADeviceId]);
+ for (let client of clientList) {
+ // Clients might not have an `fxaDeviceId` if they fail the FxA
+ // registration process.
+ if (!client.fxaDeviceId) {
+ continue;
+ }
+ if (this._knownStaleFxADeviceIds.includes(client.fxaDeviceId)) {
+ this._log.info(
+ `Hiding stale client ${client.id} - in known stale clients list`
+ );
+ client.stale = true;
+ } else if (seenDeviceIds.has(client.fxaDeviceId)) {
+ this._log.info(
+ `Hiding stale client ${client.id}` +
+ ` - duplicate device id ${client.fxaDeviceId}`
+ );
+ client.stale = true;
+ } else {
+ seenDeviceIds.add(client.fxaDeviceId);
+ }
+ }
+ },
+
+ async _fetchFxADevices() {
+ // We only force a refresh periodically to keep the load on the servers
+ // down, and because we expect FxA to have received a push message in
+ // most cases when the FxA device list would have changed. For this reason
+ // we still go ahead and check the stale list even if we didn't force a
+ // refresh.
+ let now = this.fxAccounts._internal.now(); // tests mock this .now() impl.
+ if (now - REFRESH_FXA_DEVICE_INTERVAL_MS > this._lastFxaDeviceRefresh) {
+ this._lastFxaDeviceRefresh = now;
+ try {
+ await this.fxAccounts.device.refreshDeviceList();
+ } catch (e) {
+ this._log.error("Could not refresh the FxA device list", e);
+ }
+ }
+
+ // We assume that clients not present in the FxA Device Manager list have been
+ // disconnected and so are stale
+ this._log.debug("Refreshing the known stale clients list");
+ let localClients = Object.values(this._store._remoteClients)
+ .filter(client => client.fxaDeviceId) // iOS client records don't have fxaDeviceId
+ .map(client => client.fxaDeviceId);
+ const fxaClients = this.fxAccounts.device.recentDeviceList
+ ? this.fxAccounts.device.recentDeviceList.map(device => device.id)
+ : [];
+ this._knownStaleFxADeviceIds = Utils.arraySub(localClients, fxaClients);
+ },
+
+ async _syncStartup() {
+ // Reupload new client record periodically.
+ if (Date.now() / 1000 - this.lastRecordUpload > CLIENTS_TTL_REFRESH) {
+ await this._tracker.addChangedID(this.localID);
+ }
+ return SyncEngine.prototype._syncStartup.call(this);
+ },
+
+ async _processIncoming() {
+ // Fetch all records from the server.
+ await this.resetLastSync();
+ this._incomingClients = {};
+ try {
+ await SyncEngine.prototype._processIncoming.call(this);
+ // Update FxA Device list.
+ await this._fetchFxADevices();
+ // Since clients are synced unconditionally, any records in the local store
+ // that don't exist on the server must be for disconnected clients. Remove
+ // them, so that we don't upload records with commands for clients that will
+ // never see them. We also do this to filter out stale clients from the
+ // tabs collection, since showing their list of tabs is confusing.
+ for (let id in this._store._remoteClients) {
+ if (!this._incomingClients[id]) {
+ this._log.info(`Removing local state for deleted client ${id}`);
+ await this._removeRemoteClient(id);
+ }
+ }
+ let localFxADeviceId = await lazy.fxAccounts.device.getLocalId();
+ // Bug 1264498: Mobile clients don't remove themselves from the clients
+ // collection when the user disconnects Sync, so we mark as stale clients
+ // with the same name that haven't synced in over a week.
+ // (Note we can't simply delete them, or we re-apply them next sync - see
+ // bug 1287687)
+ this._localClientLastModified = Math.round(
+ this._incomingClients[this.localID]
+ );
+ delete this._incomingClients[this.localID];
+ let names = new Set([this.localName]);
+ let seenDeviceIds = new Set([localFxADeviceId]);
+ let idToLastModifiedList = Object.entries(this._incomingClients).sort(
+ (a, b) => b[1] - a[1]
+ );
+ for (let [id, serverLastModified] of idToLastModifiedList) {
+ let record = this._store._remoteClients[id];
+ // stash the server last-modified time on the record.
+ record.serverLastModified = serverLastModified;
+ if (
+ record.fxaDeviceId &&
+ this._knownStaleFxADeviceIds.includes(record.fxaDeviceId)
+ ) {
+ this._log.info(
+ `Hiding stale client ${id} - in known stale clients list`
+ );
+ record.stale = true;
+ }
+ if (!names.has(record.name)) {
+ if (record.fxaDeviceId) {
+ seenDeviceIds.add(record.fxaDeviceId);
+ }
+ names.add(record.name);
+ continue;
+ }
+ let remoteAge = Resource.serverTime - this._incomingClients[id];
+ if (remoteAge > STALE_CLIENT_REMOTE_AGE) {
+ this._log.info(`Hiding stale client ${id} with age ${remoteAge}`);
+ record.stale = true;
+ continue;
+ }
+ if (record.fxaDeviceId && seenDeviceIds.has(record.fxaDeviceId)) {
+ this._log.info(
+ `Hiding stale client ${record.id}` +
+ ` - duplicate device id ${record.fxaDeviceId}`
+ );
+ record.stale = true;
+ } else if (record.fxaDeviceId) {
+ seenDeviceIds.add(record.fxaDeviceId);
+ }
+ }
+ } finally {
+ this._incomingClients = null;
+ }
+ },
+
+ async _uploadOutgoing() {
+ this._currentlySyncingCommands = await this._prepareCommandsForUpload();
+ const clientWithPendingCommands = Object.keys(
+ this._currentlySyncingCommands
+ );
+ for (let clientId of clientWithPendingCommands) {
+ if (this._store._remoteClients[clientId] || this.localID == clientId) {
+ this._modified.set(clientId, 0);
+ }
+ }
+ let updatedIDs = this._modified.ids();
+ await SyncEngine.prototype._uploadOutgoing.call(this);
+ // Record the response time as the server time for each item we uploaded.
+ let lastSync = await this.getLastSync();
+ for (let id of updatedIDs) {
+ if (id == this.localID) {
+ this.lastRecordUpload = lastSync;
+ } else {
+ this._store._remoteClients[id].serverLastModified = lastSync;
+ }
+ }
+ },
+
+ async _onRecordsWritten(succeeded, failed) {
+ // Reconcile the status of the local records with what we just wrote on the
+ // server
+ for (let id of succeeded) {
+ const commandChanges = this._currentlySyncingCommands[id];
+ if (id == this.localID) {
+ if (this.isFirstSync) {
+ this._log.info(
+ "Uploaded our client record for the first time, notifying other clients."
+ );
+ this._notifyClientRecordUploaded();
+ }
+ if (this.localCommands) {
+ this.localCommands = this.localCommands.filter(
+ command => !hasDupeCommand(commandChanges, command)
+ );
+ }
+ } else {
+ const clientRecord = this._store._remoteClients[id];
+ if (!commandChanges || !clientRecord) {
+ // should be impossible, else we wouldn't have been writing it.
+ this._log.warn(
+ "No command/No record changes for a client we uploaded"
+ );
+ continue;
+ }
+ // fixup the client record, so our copy of _remoteClients matches what we uploaded.
+ this._store._remoteClients[id] = await this._store.createRecord(id);
+ // we could do better and pass the reference to the record we just uploaded,
+ // but this will do for now
+ }
+ }
+
+ // Re-add failed commands
+ for (let id of failed) {
+ const commandChanges = this._currentlySyncingCommands[id];
+ if (!commandChanges) {
+ continue;
+ }
+ await this._addClientCommand(id, commandChanges);
+ }
+
+ await this._deleteUploadedCommands();
+
+ // Notify other devices that their own client collection changed
+ const idsToNotify = succeeded.reduce((acc, id) => {
+ if (id == this.localID) {
+ return acc;
+ }
+ const fxaDeviceId = this.getClientFxaDeviceId(id);
+ return fxaDeviceId ? acc.concat(fxaDeviceId) : acc;
+ }, []);
+ if (idsToNotify.length) {
+ this._notifyOtherClientsModified(idsToNotify);
+ }
+ },
+
+ _notifyOtherClientsModified(ids) {
+ // We are not waiting on this promise on purpose.
+ this._notifyCollectionChanged(
+ ids,
+ NOTIFY_TAB_SENT_TTL_SECS,
+ COLLECTION_MODIFIED_REASON_SENDTAB
+ );
+ },
+
+ _notifyClientRecordUploaded() {
+ // We are not waiting on this promise on purpose.
+ this._notifyCollectionChanged(
+ null,
+ 0,
+ COLLECTION_MODIFIED_REASON_FIRSTSYNC
+ );
+ },
+
+ /**
+ * @param {?string[]} ids FxA Client IDs to notify. null means everyone else.
+ * @param {number} ttl TTL of the push notification.
+ * @param {string} reason Reason for sending this push notification.
+ */
+ async _notifyCollectionChanged(ids, ttl, reason) {
+ const message = {
+ version: 1,
+ command: "sync:collection_changed",
+ data: {
+ collections: ["clients"],
+ reason,
+ },
+ };
+ let excludedIds = null;
+ if (!ids) {
+ const localFxADeviceId = await lazy.fxAccounts.device.getLocalId();
+ excludedIds = [localFxADeviceId];
+ }
+ try {
+ await this.fxAccounts.notifyDevices(ids, excludedIds, message, ttl);
+ } catch (e) {
+ this._log.error("Could not notify of changes in the collection", e);
+ }
+ },
+
+ async _syncFinish() {
+ // Record histograms for our device types, and also write them to a pref
+ // so non-histogram telemetry (eg, UITelemetry) and the sync scheduler
+ // has easy access to them, and so they are accurate even before we've
+ // successfully synced the first time after startup.
+ let deviceTypeCounts = this.deviceTypes;
+ for (let [deviceType, count] of deviceTypeCounts) {
+ let hid;
+ let prefName = this.name + ".devices.";
+ switch (deviceType) {
+ case DEVICE_TYPE_DESKTOP:
+ hid = "WEAVE_DEVICE_COUNT_DESKTOP";
+ prefName += "desktop";
+ break;
+ case DEVICE_TYPE_MOBILE:
+ hid = "WEAVE_DEVICE_COUNT_MOBILE";
+ prefName += "mobile";
+ break;
+ default:
+ this._log.warn(
+ `Unexpected deviceType "${deviceType}" recording device telemetry.`
+ );
+ continue;
+ }
+ Services.telemetry.getHistogramById(hid).add(count);
+ // Optimization: only write the pref if it changed since our last sync.
+ if (
+ this._lastDeviceCounts == null ||
+ this._lastDeviceCounts.get(prefName) != count
+ ) {
+ Svc.PrefBranch.setIntPref(prefName, count);
+ }
+ }
+ this._lastDeviceCounts = deviceTypeCounts;
+ return SyncEngine.prototype._syncFinish.call(this);
+ },
+
+ async _reconcile(item) {
+ // Every incoming record is reconciled, so we use this to track the
+ // contents of the collection on the server.
+ this._incomingClients[item.id] = item.modified;
+
+ if (!(await this._store.itemExists(item.id))) {
+ return true;
+ }
+ // Clients are synced unconditionally, so we'll always have new records.
+ // Unfortunately, this will cause the scheduler to use the immediate sync
+ // interval for the multi-device case, instead of the active interval. We
+ // work around this by updating the record during reconciliation, and
+ // returning false to indicate that the record doesn't need to be applied
+ // later.
+ await this._store.update(item);
+ return false;
+ },
+
+ // Treat reset the same as wiping for locally cached clients
+ async _resetClient() {
+ await this._wipeClient();
+ },
+
+ async _wipeClient() {
+ await SyncEngine.prototype._resetClient.call(this);
+ this._knownStaleFxADeviceIds = null;
+ delete this.localCommands;
+ await this._store.wipe();
+ try {
+ await Utils.jsonRemove("commands", this);
+ } catch (err) {
+ this._log.warn("Could not delete commands.json", err);
+ }
+ try {
+ await Utils.jsonRemove("commands-syncing", this);
+ } catch (err) {
+ this._log.warn("Could not delete commands-syncing.json", err);
+ }
+ },
+
+ async removeClientData() {
+ let res = this.service.resource(this.engineURL + "/" + this.localID);
+ await res.delete();
+ },
+
+ // Override the default behavior to delete bad records from the server.
+ async handleHMACMismatch(item, mayRetry) {
+ this._log.debug("Handling HMAC mismatch for " + item.id);
+
+ let base = await SyncEngine.prototype.handleHMACMismatch.call(
+ this,
+ item,
+ mayRetry
+ );
+ if (base != SyncEngine.kRecoveryStrategy.error) {
+ return base;
+ }
+
+ // It's a bad client record. Save it to be deleted at the end of the sync.
+ this._log.debug("Bad client record detected. Scheduling for deletion.");
+ await this._deleteId(item.id);
+
+ // Neither try again nor error; we're going to delete it.
+ return SyncEngine.kRecoveryStrategy.ignore;
+ },
+
+ /**
+ * A hash of valid commands that the client knows about. The key is a command
+ * and the value is a hash containing information about the command such as
+ * number of arguments, description, and importance (lower importance numbers
+ * indicate higher importance.
+ */
+ _commands: {
+ resetAll: {
+ args: 0,
+ importance: 0,
+ desc: "Clear temporary local data for all engines",
+ },
+ resetEngine: {
+ args: 1,
+ importance: 0,
+ desc: "Clear temporary local data for engine",
+ },
+ wipeEngine: {
+ args: 1,
+ importance: 0,
+ desc: "Delete all client data for engine",
+ },
+ logout: { args: 0, importance: 0, desc: "Log out client" },
+ },
+
+ /**
+ * Sends a command+args pair to a specific client.
+ *
+ * @param command Command string
+ * @param args Array of arguments/data for command
+ * @param clientId Client to send command to
+ */
+ async _sendCommandToClient(command, args, clientId, telemetryExtra) {
+ this._log.trace("Sending " + command + " to " + clientId);
+
+ let client = this._store._remoteClients[clientId];
+ if (!client) {
+ throw new Error("Unknown remote client ID: '" + clientId + "'.");
+ }
+ if (client.stale) {
+ throw new Error("Stale remote client ID: '" + clientId + "'.");
+ }
+
+ let action = {
+ command,
+ args,
+ // We send the flowID to the other client so *it* can report it in its
+ // telemetry - we record it in ours below.
+ flowID: telemetryExtra.flowID,
+ };
+
+ if (await this._addClientCommand(clientId, action)) {
+ this._log.trace(`Client ${clientId} got a new action`, [command, args]);
+ await this._tracker.addChangedID(clientId);
+ try {
+ telemetryExtra.deviceID =
+ this.service.identity.hashedDeviceID(clientId);
+ } catch (_) {}
+
+ this.service.recordTelemetryEvent(
+ "sendcommand",
+ command,
+ undefined,
+ telemetryExtra
+ );
+ } else {
+ this._log.trace(`Client ${clientId} got a duplicate action`, [
+ command,
+ args,
+ ]);
+ }
+ },
+
+ /**
+ * Check if the local client has any remote commands and perform them.
+ *
+ * @return false to abort sync
+ */
+ async processIncomingCommands() {
+ return this._notify("clients:process-commands", "", async function () {
+ if (
+ !this.localCommands ||
+ (this._lastModifiedOnProcessCommands == this._localClientLastModified &&
+ !this.ignoreLastModifiedOnProcessCommands)
+ ) {
+ return true;
+ }
+ this._lastModifiedOnProcessCommands = this._localClientLastModified;
+
+ const clearedCommands = await this._readCommands()[this.localID];
+ const commands = this.localCommands.filter(
+ command => !hasDupeCommand(clearedCommands, command)
+ );
+ let didRemoveCommand = false;
+ // Process each command in order.
+ for (let rawCommand of commands) {
+ let shouldRemoveCommand = true; // most commands are auto-removed.
+ let { command, args, flowID } = rawCommand;
+ this._log.debug("Processing command " + command, args);
+
+ this.service.recordTelemetryEvent(
+ "processcommand",
+ command,
+ undefined,
+ { flowID }
+ );
+
+ let engines = [args[0]];
+ switch (command) {
+ case "resetAll":
+ engines = null;
+ // Fallthrough
+ case "resetEngine":
+ await this.service.resetClient(engines);
+ break;
+ case "wipeEngine":
+ await this.service.wipeClient(engines);
+ break;
+ case "logout":
+ this.service.logout();
+ return false;
+ default:
+ this._log.warn("Received an unknown command: " + command);
+ break;
+ }
+ // Add the command to the "cleared" commands list
+ if (shouldRemoveCommand) {
+ await this.removeLocalCommand(rawCommand);
+ didRemoveCommand = true;
+ }
+ }
+ if (didRemoveCommand) {
+ await this._tracker.addChangedID(this.localID);
+ }
+
+ return true;
+ })();
+ },
+
+ /**
+ * Validates and sends a command to a client or all clients.
+ *
+ * Calling this does not actually sync the command data to the server. If the
+ * client already has the command/args pair, it won't receive a duplicate
+ * command.
+ * This method is async since it writes the command to a file.
+ *
+ * @param command
+ * Command to invoke on remote clients
+ * @param args
+ * Array of arguments to give to the command
+ * @param clientId
+ * Client ID to send command to. If undefined, send to all remote
+ * clients.
+ * @param flowID
+ * A unique identifier used to track success for this operation across
+ * devices.
+ */
+ async sendCommand(command, args, clientId = null, telemetryExtra = {}) {
+ let commandData = this._commands[command];
+ // Don't send commands that we don't know about.
+ if (!commandData) {
+ this._log.error("Unknown command to send: " + command);
+ return;
+ } else if (!args || args.length != commandData.args) {
+ // Don't send a command with the wrong number of arguments.
+ this._log.error(
+ "Expected " +
+ commandData.args +
+ " args for '" +
+ command +
+ "', but got " +
+ args
+ );
+ return;
+ }
+
+ // We allocate a "flowID" here, so it is used for each client.
+ telemetryExtra = Object.assign({}, telemetryExtra); // don't clobber the caller's object
+ if (!telemetryExtra.flowID) {
+ telemetryExtra.flowID = Utils.makeGUID();
+ }
+
+ if (clientId) {
+ await this._sendCommandToClient(command, args, clientId, telemetryExtra);
+ } else {
+ for (let [id, record] of Object.entries(this._store._remoteClients)) {
+ if (!record.stale) {
+ await this._sendCommandToClient(command, args, id, telemetryExtra);
+ }
+ }
+ }
+ },
+
+ async _removeRemoteClient(id) {
+ delete this._store._remoteClients[id];
+ await this._tracker.removeChangedID(id);
+ await this._removeClientCommands(id);
+ this._modified.delete(id);
+ },
+};
+Object.setPrototypeOf(ClientEngine.prototype, SyncEngine.prototype);
+
+function ClientStore(name, engine) {
+ Store.call(this, name, engine);
+}
+ClientStore.prototype = {
+ _remoteClients: {},
+
+ async create(record) {
+ await this.update(record);
+ },
+
+ async update(record) {
+ if (record.id == this.engine.localID) {
+ // Only grab commands from the server; local name/type always wins
+ this.engine.localCommands = record.commands;
+ } else {
+ this._remoteClients[record.id] = record.cleartext;
+ }
+ },
+
+ async createRecord(id, collection) {
+ let record = new ClientsRec(collection, id);
+
+ const commandsChanges = this.engine._currentlySyncingCommands
+ ? this.engine._currentlySyncingCommands[id]
+ : [];
+
+ // Package the individual components into a record for the local client
+ if (id == this.engine.localID) {
+ try {
+ record.fxaDeviceId = await this.engine.fxAccounts.device.getLocalId();
+ } catch (error) {
+ this._log.warn("failed to get fxa device id", error);
+ }
+ record.name = this.engine.localName;
+ record.type = this.engine.localType;
+ record.version = Services.appinfo.version;
+ record.protocols = SUPPORTED_PROTOCOL_VERSIONS;
+
+ // Substract the commands we recorded that we've already executed
+ if (
+ commandsChanges &&
+ commandsChanges.length &&
+ this.engine.localCommands &&
+ this.engine.localCommands.length
+ ) {
+ record.commands = this.engine.localCommands.filter(
+ command => !hasDupeCommand(commandsChanges, command)
+ );
+ }
+
+ // Optional fields.
+ record.os = Services.appinfo.OS; // "Darwin"
+ record.appPackage = Services.appinfo.ID;
+ record.application = this.engine.brandName; // "Nightly"
+
+ // We can't compute these yet.
+ // record.device = ""; // Bug 1100723
+ // record.formfactor = ""; // Bug 1100722
+ } else {
+ record.cleartext = Object.assign({}, this._remoteClients[id]);
+ delete record.cleartext.serverLastModified; // serverLastModified is a local only attribute.
+
+ // Add the commands we have to send
+ if (commandsChanges && commandsChanges.length) {
+ const recordCommands = record.cleartext.commands || [];
+ const newCommands = commandsChanges.filter(
+ command => !hasDupeCommand(recordCommands, command)
+ );
+ record.cleartext.commands = recordCommands.concat(newCommands);
+ }
+
+ if (record.cleartext.stale) {
+ // It's almost certainly a logic error for us to upload a record we
+ // consider stale, so make log noise, but still remove the flag.
+ this._log.error(
+ `Preparing to upload record ${id} that we consider stale`
+ );
+ delete record.cleartext.stale;
+ }
+ }
+ if (record.commands) {
+ const maxPayloadSize =
+ this.engine.service.getMemcacheMaxRecordPayloadSize();
+ let origOrder = new Map(record.commands.map((c, i) => [c, i]));
+ // we sort first by priority, and second by age (indicated by order in the
+ // original list)
+ let commands = record.commands.slice().sort((a, b) => {
+ let infoA = this.engine._commands[a.command];
+ let infoB = this.engine._commands[b.command];
+ // Treat unknown command types as highest priority, to allow us to add
+ // high priority commands in the future without worrying about clients
+ // removing them on each-other unnecessarially.
+ let importA = infoA ? infoA.importance : 0;
+ let importB = infoB ? infoB.importance : 0;
+ // Higher importantance numbers indicate that we care less, so they
+ // go to the end of the list where they'll be popped off.
+ let importDelta = importA - importB;
+ if (importDelta != 0) {
+ return importDelta;
+ }
+ let origIdxA = origOrder.get(a);
+ let origIdxB = origOrder.get(b);
+ // Within equivalent priorities, we put older entries near the end
+ // of the list, so that they are removed first.
+ return origIdxB - origIdxA;
+ });
+ let truncatedCommands = Utils.tryFitItems(commands, maxPayloadSize);
+ if (truncatedCommands.length != record.commands.length) {
+ this._log.warn(
+ `Removing commands from client ${id} (from ${record.commands.length} to ${truncatedCommands.length})`
+ );
+ // Restore original order.
+ record.commands = truncatedCommands.sort(
+ (a, b) => origOrder.get(a) - origOrder.get(b)
+ );
+ }
+ }
+ return record;
+ },
+
+ async itemExists(id) {
+ return id in (await this.getAllIDs());
+ },
+
+ async getAllIDs() {
+ let ids = {};
+ ids[this.engine.localID] = true;
+ for (let id in this._remoteClients) {
+ ids[id] = true;
+ }
+ return ids;
+ },
+
+ async wipe() {
+ this._remoteClients = {};
+ },
+};
+Object.setPrototypeOf(ClientStore.prototype, Store.prototype);
+
+function ClientsTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+ClientsTracker.prototype = {
+ _enabled: false,
+
+ onStart() {
+ Svc.Obs.add("fxaccounts:new_device_id", this.asyncObserver);
+ Services.prefs.addObserver(
+ PREF_ACCOUNT_ROOT + "device.name",
+ this.asyncObserver
+ );
+ },
+ onStop() {
+ Services.prefs.removeObserver(
+ PREF_ACCOUNT_ROOT + "device.name",
+ this.asyncObserver
+ );
+ Svc.Obs.remove("fxaccounts:new_device_id", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ switch (topic) {
+ case "nsPref:changed":
+ this._log.debug("client.name preference changed");
+ // Fallthrough intended.
+ case "fxaccounts:new_device_id":
+ await this.addChangedID(this.engine.localID);
+ this.score += SINGLE_USER_THRESHOLD + 1; // ALWAYS SYNC NOW.
+ break;
+ }
+ },
+};
+Object.setPrototypeOf(ClientsTracker.prototype, LegacyTracker.prototype);
diff --git a/services/sync/modules/engines/extension-storage.sys.mjs b/services/sync/modules/engines/extension-storage.sys.mjs
new file mode 100644
index 0000000000..d2671978c8
--- /dev/null
+++ b/services/sync/modules/engines/extension-storage.sys.mjs
@@ -0,0 +1,308 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import {
+ BridgedEngine,
+ BridgeWrapperXPCOM,
+ LogAdapter,
+} from "resource://services-sync/bridged_engine.sys.mjs";
+import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ MULTI_DEVICE_THRESHOLD: "resource://services-sync/constants.sys.mjs",
+ Observers: "resource://services-common/observers.sys.mjs",
+ SCORE_INCREMENT_MEDIUM: "resource://services-sync/constants.sys.mjs",
+ Svc: "resource://services-sync/util.sys.mjs",
+ extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
+
+ extensionStorageSyncKinto:
+ "resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs",
+});
+
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "StorageSyncService",
+ "@mozilla.org/extensions/storage/sync;1",
+ "nsIInterfaceRequestor"
+);
+
+const PREF_FORCE_ENABLE = "engine.extension-storage.force";
+
+// A helper to indicate whether extension-storage is enabled - it's based on
+// the "addons" pref. The same logic is shared between both engine impls.
+function getEngineEnabled() {
+ // By default, we sync extension storage if we sync addons. This
+ // lets us simplify the UX since users probably don't consider
+ // "extension preferences" a separate category of syncing.
+ // However, we also respect engine.extension-storage.force, which
+ // can be set to true or false, if a power user wants to customize
+ // the behavior despite the lack of UI.
+ if (
+ lazy.Svc.PrefBranch.getPrefType(PREF_FORCE_ENABLE) !=
+ Ci.nsIPrefBranch.PREF_INVALID
+ ) {
+ return lazy.Svc.PrefBranch.getBoolPref(PREF_FORCE_ENABLE);
+ }
+ return lazy.Svc.PrefBranch.getBoolPref("engine.addons", false);
+}
+
+function setEngineEnabled(enabled) {
+ // This will be called by the engine manager when declined on another device.
+ // Things will go a bit pear-shaped if the engine manager tries to end up
+ // with 'addons' and 'extension-storage' in different states - however, this
+ // *can* happen given we support the `engine.extension-storage.force`
+ // preference. So if that pref exists, we set it to this value. If that pref
+ // doesn't exist, we just ignore it and hope that the 'addons' engine is also
+ // going to be set to the same state.
+ if (
+ lazy.Svc.PrefBranch.getPrefType(PREF_FORCE_ENABLE) !=
+ Ci.nsIPrefBranch.PREF_INVALID
+ ) {
+ lazy.Svc.PrefBranch.setBoolPref(PREF_FORCE_ENABLE, enabled);
+ }
+}
+
+// A "bridged engine" to our webext-storage component.
+export function ExtensionStorageEngineBridge(service) {
+ this.component = lazy.StorageSyncService.getInterface(
+ Ci.mozIBridgedSyncEngine
+ );
+ BridgedEngine.call(this, "Extension-Storage", service);
+ this._bridge = new BridgeWrapperXPCOM(this.component);
+
+ let app_services_logger = Cc["@mozilla.org/appservices/logger;1"].getService(
+ Ci.mozIAppServicesLogger
+ );
+ let logger_target = "app-services:webext_storage:sync";
+ app_services_logger.register(logger_target, new LogAdapter(this._log));
+}
+
+ExtensionStorageEngineBridge.prototype = {
+ syncPriority: 10,
+
+ // Used to override the engine name in telemetry, so that we can distinguish .
+ overrideTelemetryName: "rust-webext-storage",
+
+ _notifyPendingChanges() {
+ return new Promise(resolve => {
+ this.component
+ .QueryInterface(Ci.mozISyncedExtensionStorageArea)
+ .fetchPendingSyncChanges({
+ QueryInterface: ChromeUtils.generateQI([
+ "mozIExtensionStorageListener",
+ "mozIExtensionStorageCallback",
+ ]),
+ onChanged: (extId, json) => {
+ try {
+ lazy.extensionStorageSync.notifyListeners(
+ extId,
+ JSON.parse(json)
+ );
+ } catch (ex) {
+ this._log.warn(
+ `Error notifying change listeners for ${extId}`,
+ ex
+ );
+ }
+ },
+ handleSuccess: resolve,
+ handleError: (code, message) => {
+ this._log.warn(
+ "Error fetching pending synced changes",
+ message,
+ code
+ );
+ resolve();
+ },
+ });
+ });
+ },
+
+ _takeMigrationInfo() {
+ return new Promise((resolve, reject) => {
+ this.component
+ .QueryInterface(Ci.mozIExtensionStorageArea)
+ .takeMigrationInfo({
+ QueryInterface: ChromeUtils.generateQI([
+ "mozIExtensionStorageCallback",
+ ]),
+ handleSuccess: result => {
+ resolve(result ? JSON.parse(result) : null);
+ },
+ handleError: (code, message) => {
+ this._log.warn("Error fetching migration info", message, code);
+ // `takeMigrationInfo` doesn't actually perform the migration,
+ // just reads (and clears) any data stored in the DB from the
+ // previous migration.
+ //
+ // Any errors here are very likely occurring a good while
+ // after the migration ran, so we just warn and pretend
+ // nothing was there.
+ resolve(null);
+ },
+ });
+ });
+ },
+
+ async _syncStartup() {
+ let result = await super._syncStartup();
+ let info = await this._takeMigrationInfo();
+ if (info) {
+ lazy.Observers.notify(
+ "weave:telemetry:migration",
+ info,
+ "webext-storage"
+ );
+ }
+ return result;
+ },
+
+ async _processIncoming() {
+ await super._processIncoming();
+ try {
+ await this._notifyPendingChanges();
+ } catch (ex) {
+ // Failing to notify `storage.onChanged` observers is bad, but shouldn't
+ // interrupt syncing.
+ this._log.warn("Error notifying about synced changes", ex);
+ }
+ },
+
+ get enabled() {
+ return getEngineEnabled();
+ },
+ set enabled(enabled) {
+ setEngineEnabled(enabled);
+ },
+};
+Object.setPrototypeOf(
+ ExtensionStorageEngineBridge.prototype,
+ BridgedEngine.prototype
+);
+
+/**
+ *****************************************************************************
+ *
+ * Deprecated support for Kinto
+ *
+ *****************************************************************************
+ */
+
+/**
+ * The Engine that manages syncing for the web extension "storage"
+ * API, and in particular ext.storage.sync.
+ *
+ * ext.storage.sync is implemented using Kinto, so it has mechanisms
+ * for syncing that we do not need to integrate in the Firefox Sync
+ * framework, so this is something of a stub.
+ */
+export function ExtensionStorageEngineKinto(service) {
+ SyncEngine.call(this, "Extension-Storage", service);
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_skipPercentageChance",
+ "services.sync.extension-storage.skipPercentageChance",
+ 0
+ );
+}
+
+ExtensionStorageEngineKinto.prototype = {
+ _trackerObj: ExtensionStorageTracker,
+ // we don't need these since we implement our own sync logic
+ _storeObj: undefined,
+ _recordObj: undefined,
+
+ syncPriority: 10,
+ allowSkippedRecord: false,
+
+ async _sync() {
+ return lazy.extensionStorageSyncKinto.syncAll();
+ },
+
+ get enabled() {
+ return getEngineEnabled();
+ },
+ // We only need the enabled setter for the edge-case where info/collections
+ // has `extension-storage` - which could happen if the pref to flip the new
+ // engine on was once set but no longer is.
+ set enabled(enabled) {
+ setEngineEnabled(enabled);
+ },
+
+ _wipeClient() {
+ return lazy.extensionStorageSyncKinto.clearAll();
+ },
+
+ shouldSkipSync(syncReason) {
+ if (syncReason == "user" || syncReason == "startup") {
+ this._log.info(
+ `Not skipping extension storage sync: reason == ${syncReason}`
+ );
+ // Always sync if a user clicks the button, or if we're starting up.
+ return false;
+ }
+ // Ensure this wouldn't cause a resync...
+ if (this._tracker.score >= lazy.MULTI_DEVICE_THRESHOLD) {
+ this._log.info(
+ "Not skipping extension storage sync: Would trigger resync anyway"
+ );
+ return false;
+ }
+
+ let probability = this._skipPercentageChance / 100.0;
+ // Math.random() returns a value in the interval [0, 1), so `>` is correct:
+ // if `probability` is 1 skip every time, and if it's 0, never skip.
+ let shouldSkip = probability > Math.random();
+
+ this._log.info(
+ `Skipping extension-storage sync with a chance of ${probability}: ${shouldSkip}`
+ );
+ return shouldSkip;
+ },
+};
+Object.setPrototypeOf(
+ ExtensionStorageEngineKinto.prototype,
+ SyncEngine.prototype
+);
+
+function ExtensionStorageTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ this._ignoreAll = false;
+}
+ExtensionStorageTracker.prototype = {
+ get ignoreAll() {
+ return this._ignoreAll;
+ },
+
+ set ignoreAll(value) {
+ this._ignoreAll = value;
+ },
+
+ onStart() {
+ lazy.Svc.Obs.add("ext.storage.sync-changed", this.asyncObserver);
+ },
+
+ onStop() {
+ lazy.Svc.Obs.remove("ext.storage.sync-changed", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ if (this.ignoreAll) {
+ return;
+ }
+
+ if (topic !== "ext.storage.sync-changed") {
+ return;
+ }
+
+ // Single adds, removes and changes are not so important on their
+ // own, so let's just increment score a bit.
+ this.score += lazy.SCORE_INCREMENT_MEDIUM;
+ },
+};
+Object.setPrototypeOf(ExtensionStorageTracker.prototype, Tracker.prototype);
diff --git a/services/sync/modules/engines/forms.sys.mjs b/services/sync/modules/engines/forms.sys.mjs
new file mode 100644
index 0000000000..3516327659
--- /dev/null
+++ b/services/sync/modules/engines/forms.sys.mjs
@@ -0,0 +1,298 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { SCORE_INCREMENT_MEDIUM } from "resource://services-sync/constants.sys.mjs";
+import {
+ CollectionProblemData,
+ CollectionValidator,
+} from "resource://services-sync/collection_validator.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+const lazy = {};
+ChromeUtils.defineESModuleGetters(lazy, {
+ FormHistory: "resource://gre/modules/FormHistory.sys.mjs",
+});
+
+const FORMS_TTL = 3 * 365 * 24 * 60 * 60; // Three years in seconds.
+
+export function FormRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+FormRec.prototype = {
+ _logName: "Sync.Record.Form",
+ ttl: FORMS_TTL,
+};
+Object.setPrototypeOf(FormRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(FormRec, "cleartext", ["name", "value"]);
+
+var FormWrapper = {
+ _log: Log.repository.getLogger("Sync.Engine.Forms"),
+
+ _getEntryCols: ["fieldname", "value"],
+ _guidCols: ["guid"],
+
+ _search(terms, searchData) {
+ return lazy.FormHistory.search(terms, searchData);
+ },
+
+ async _update(changes) {
+ if (!lazy.FormHistory.enabled) {
+ return; // update isn't going to do anything.
+ }
+ await lazy.FormHistory.update(changes).catch(console.error);
+ },
+
+ async getEntry(guid) {
+ let results = await this._search(this._getEntryCols, { guid });
+ if (!results.length) {
+ return null;
+ }
+ return { name: results[0].fieldname, value: results[0].value };
+ },
+
+ async getGUID(name, value) {
+ // Query for the provided entry.
+ let query = { fieldname: name, value };
+ let results = await this._search(this._guidCols, query);
+ return results.length ? results[0].guid : null;
+ },
+
+ async hasGUID(guid) {
+ // We could probably use a count function here, but search exists...
+ let results = await this._search(this._guidCols, { guid });
+ return !!results.length;
+ },
+
+ async replaceGUID(oldGUID, newGUID) {
+ let changes = {
+ op: "update",
+ guid: oldGUID,
+ newGuid: newGUID,
+ };
+ await this._update(changes);
+ },
+};
+
+export function FormEngine(service) {
+ SyncEngine.call(this, "Forms", service);
+}
+
+FormEngine.prototype = {
+ _storeObj: FormStore,
+ _trackerObj: FormTracker,
+ _recordObj: FormRec,
+
+ syncPriority: 6,
+
+ get prefName() {
+ return "history";
+ },
+
+ async _findDupe(item) {
+ return FormWrapper.getGUID(item.name, item.value);
+ },
+};
+Object.setPrototypeOf(FormEngine.prototype, SyncEngine.prototype);
+
+function FormStore(name, engine) {
+ Store.call(this, name, engine);
+}
+FormStore.prototype = {
+ async _processChange(change) {
+ // If this._changes is defined, then we are applying a batch, so we
+ // can defer it.
+ if (this._changes) {
+ this._changes.push(change);
+ return;
+ }
+
+ // Otherwise we must handle the change right now.
+ await FormWrapper._update(change);
+ },
+
+ async applyIncomingBatch(records, countTelemetry) {
+ Async.checkAppReady();
+ // We collect all the changes to be made then apply them all at once.
+ this._changes = [];
+ let failures = await Store.prototype.applyIncomingBatch.call(
+ this,
+ records,
+ countTelemetry
+ );
+ if (this._changes.length) {
+ await FormWrapper._update(this._changes);
+ }
+ delete this._changes;
+ return failures;
+ },
+
+ async getAllIDs() {
+ let results = await FormWrapper._search(["guid"], []);
+ let guids = {};
+ for (let result of results) {
+ guids[result.guid] = true;
+ }
+ return guids;
+ },
+
+ async changeItemID(oldID, newID) {
+ await FormWrapper.replaceGUID(oldID, newID);
+ },
+
+ async itemExists(id) {
+ return FormWrapper.hasGUID(id);
+ },
+
+ async createRecord(id, collection) {
+ let record = new FormRec(collection, id);
+ let entry = await FormWrapper.getEntry(id);
+ if (entry != null) {
+ record.name = entry.name;
+ record.value = entry.value;
+ } else {
+ record.deleted = true;
+ }
+ return record;
+ },
+
+ async create(record) {
+ this._log.trace("Adding form record for " + record.name);
+ let change = {
+ op: "add",
+ guid: record.id,
+ fieldname: record.name,
+ value: record.value,
+ };
+ await this._processChange(change);
+ },
+
+ async remove(record) {
+ this._log.trace("Removing form record: " + record.id);
+ let change = {
+ op: "remove",
+ guid: record.id,
+ };
+ await this._processChange(change);
+ },
+
+ async update(record) {
+ this._log.trace("Ignoring form record update request!");
+ },
+
+ async wipe() {
+ let change = {
+ op: "remove",
+ };
+ await FormWrapper._update(change);
+ },
+};
+Object.setPrototypeOf(FormStore.prototype, Store.prototype);
+
+function FormTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+FormTracker.prototype = {
+ QueryInterface: ChromeUtils.generateQI([
+ "nsIObserver",
+ "nsISupportsWeakReference",
+ ]),
+
+ onStart() {
+ Svc.Obs.add("satchel-storage-changed", this.asyncObserver);
+ },
+
+ onStop() {
+ Svc.Obs.remove("satchel-storage-changed", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ if (this.ignoreAll) {
+ return;
+ }
+ switch (topic) {
+ case "satchel-storage-changed":
+ if (data == "formhistory-add" || data == "formhistory-remove") {
+ let guid = subject.QueryInterface(Ci.nsISupportsString).toString();
+ await this.trackEntry(guid);
+ }
+ break;
+ }
+ },
+
+ async trackEntry(guid) {
+ const added = await this.addChangedID(guid);
+ if (added) {
+ this.score += SCORE_INCREMENT_MEDIUM;
+ }
+ },
+};
+Object.setPrototypeOf(FormTracker.prototype, LegacyTracker.prototype);
+
+class FormsProblemData extends CollectionProblemData {
+ getSummary() {
+ // We don't support syncing deleted form data, so "clientMissing" isn't a problem
+ return super.getSummary().filter(entry => entry.name !== "clientMissing");
+ }
+}
+
+export class FormValidator extends CollectionValidator {
+ constructor() {
+ super("forms", "id", ["name", "value"]);
+ this.ignoresMissingClients = true;
+ }
+
+ emptyProblemData() {
+ return new FormsProblemData();
+ }
+
+ async getClientItems() {
+ return FormWrapper._search(["guid", "fieldname", "value"], {});
+ }
+
+ normalizeClientItem(item) {
+ return {
+ id: item.guid,
+ guid: item.guid,
+ name: item.fieldname,
+ fieldname: item.fieldname,
+ value: item.value,
+ original: item,
+ };
+ }
+
+ async normalizeServerItem(item) {
+ let res = Object.assign(
+ {
+ guid: item.id,
+ fieldname: item.name,
+ original: item,
+ },
+ item
+ );
+ // Missing `name` or `value` causes the getGUID call to throw
+ if (item.name !== undefined && item.value !== undefined) {
+ let guid = await FormWrapper.getGUID(item.name, item.value);
+ if (guid) {
+ res.guid = guid;
+ res.id = guid;
+ res.duped = true;
+ }
+ }
+
+ return res;
+ }
+}
diff --git a/services/sync/modules/engines/history.sys.mjs b/services/sync/modules/engines/history.sys.mjs
new file mode 100644
index 0000000000..44014e4d9e
--- /dev/null
+++ b/services/sync/modules/engines/history.sys.mjs
@@ -0,0 +1,654 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const HISTORY_TTL = 5184000; // 60 days in milliseconds
+const THIRTY_DAYS_IN_MS = 2592000000; // 30 days in milliseconds
+// Sync may bring new fields from other clients, not yet understood by our engine.
+// Unknown fields outside these fields are aggregated into 'unknownFields' and
+// safely synced to prevent data loss.
+const VALID_HISTORY_FIELDS = ["id", "title", "histUri", "visits"];
+const VALID_VISIT_FIELDS = ["date", "type", "transition"];
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+import {
+ MAX_HISTORY_DOWNLOAD,
+ MAX_HISTORY_UPLOAD,
+ SCORE_INCREMENT_SMALL,
+ SCORE_INCREMENT_XLARGE,
+} from "resource://services-sync/constants.sys.mjs";
+
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ PlacesSyncUtils: "resource://gre/modules/PlacesSyncUtils.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+});
+
+export function HistoryRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+HistoryRec.prototype = {
+ _logName: "Sync.Record.History",
+ ttl: HISTORY_TTL,
+};
+Object.setPrototypeOf(HistoryRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(HistoryRec, "cleartext", ["histUri", "title", "visits"]);
+
+export function HistoryEngine(service) {
+ SyncEngine.call(this, "History", service);
+}
+
+HistoryEngine.prototype = {
+ _recordObj: HistoryRec,
+ _storeObj: HistoryStore,
+ _trackerObj: HistoryTracker,
+ downloadLimit: MAX_HISTORY_DOWNLOAD,
+
+ syncPriority: 7,
+
+ async getSyncID() {
+ return lazy.PlacesSyncUtils.history.getSyncId();
+ },
+
+ async ensureCurrentSyncID(newSyncID) {
+ this._log.debug(
+ "Checking if server sync ID ${newSyncID} matches existing",
+ { newSyncID }
+ );
+ await lazy.PlacesSyncUtils.history.ensureCurrentSyncId(newSyncID);
+ return newSyncID;
+ },
+
+ async resetSyncID() {
+ // First, delete the collection on the server. It's fine if we're
+ // interrupted here: on the next sync, we'll detect that our old sync ID is
+ // now stale, and start over as a first sync.
+ await this._deleteServerCollection();
+ // Then, reset our local sync ID.
+ return this.resetLocalSyncID();
+ },
+
+ async resetLocalSyncID() {
+ let newSyncID = await lazy.PlacesSyncUtils.history.resetSyncId();
+ this._log.debug("Assigned new sync ID ${newSyncID}", { newSyncID });
+ return newSyncID;
+ },
+
+ async getLastSync() {
+ let lastSync = await lazy.PlacesSyncUtils.history.getLastSync();
+ return lastSync;
+ },
+
+ async setLastSync(lastSync) {
+ await lazy.PlacesSyncUtils.history.setLastSync(lastSync);
+ },
+
+ shouldSyncURL(url) {
+ return !url.startsWith("file:");
+ },
+
+ async pullNewChanges() {
+ const changedIDs = await this._tracker.getChangedIDs();
+ let modifiedGUIDs = Object.keys(changedIDs);
+ if (!modifiedGUIDs.length) {
+ return {};
+ }
+
+ let guidsToRemove =
+ await lazy.PlacesSyncUtils.history.determineNonSyncableGuids(
+ modifiedGUIDs
+ );
+ await this._tracker.removeChangedID(...guidsToRemove);
+ return changedIDs;
+ },
+
+ async _resetClient() {
+ await super._resetClient();
+ await lazy.PlacesSyncUtils.history.reset();
+ },
+};
+Object.setPrototypeOf(HistoryEngine.prototype, SyncEngine.prototype);
+
+function HistoryStore(name, engine) {
+ Store.call(this, name, engine);
+}
+
+HistoryStore.prototype = {
+ // We try and only update this many visits at one time.
+ MAX_VISITS_PER_INSERT: 500,
+
+ // Some helper functions to handle GUIDs
+ async setGUID(uri, guid) {
+ if (!guid) {
+ guid = Utils.makeGUID();
+ }
+
+ try {
+ await lazy.PlacesSyncUtils.history.changeGuid(uri, guid);
+ } catch (e) {
+ this._log.error("Error setting GUID ${guid} for URI ${uri}", guid, uri);
+ }
+
+ return guid;
+ },
+
+ async GUIDForUri(uri, create) {
+ // Use the existing GUID if it exists
+ let guid;
+ try {
+ guid = await lazy.PlacesSyncUtils.history.fetchGuidForURL(uri);
+ } catch (e) {
+ this._log.error("Error fetching GUID for URL ${uri}", uri);
+ }
+
+ // If the URI has an existing GUID, return it.
+ if (guid) {
+ return guid;
+ }
+
+ // If the URI doesn't have a GUID and we were indicated to create one.
+ if (create) {
+ return this.setGUID(uri);
+ }
+
+ // If the URI doesn't have a GUID and we didn't create one for it.
+ return null;
+ },
+
+ async changeItemID(oldID, newID) {
+ let info = await lazy.PlacesSyncUtils.history.fetchURLInfoForGuid(oldID);
+ if (!info) {
+ throw new Error(`Can't change ID for nonexistent history entry ${oldID}`);
+ }
+ this.setGUID(info.url, newID);
+ },
+
+ async getAllIDs() {
+ let urls = await lazy.PlacesSyncUtils.history.getAllURLs({
+ since: new Date(Date.now() - THIRTY_DAYS_IN_MS),
+ limit: MAX_HISTORY_UPLOAD,
+ });
+
+ let urlsByGUID = {};
+ for (let url of urls) {
+ if (!this.engine.shouldSyncURL(url)) {
+ continue;
+ }
+ let guid = await this.GUIDForUri(url, true);
+ urlsByGUID[guid] = url;
+ }
+ return urlsByGUID;
+ },
+
+ async applyIncomingBatch(records, countTelemetry) {
+ // Convert incoming records to mozIPlaceInfo objects which are applied as
+ // either history additions or removals.
+ let failed = [];
+ let toAdd = [];
+ let toRemove = [];
+ let pageGuidsWithUnknownFields = new Map();
+ let visitTimesWithUnknownFields = new Map();
+ await Async.yieldingForEach(records, async record => {
+ if (record.deleted) {
+ toRemove.push(record);
+ } else {
+ try {
+ let pageInfo = await this._recordToPlaceInfo(record);
+ if (pageInfo) {
+ toAdd.push(pageInfo);
+
+ // Pull any unknown fields that may have come from other clients
+ let unknownFields = lazy.PlacesSyncUtils.extractUnknownFields(
+ record.cleartext,
+ VALID_HISTORY_FIELDS
+ );
+ if (unknownFields) {
+ pageGuidsWithUnknownFields.set(pageInfo.guid, { unknownFields });
+ }
+
+ // Visits themselves could also contain unknown fields
+ for (const visit of pageInfo.visits) {
+ let unknownVisitFields =
+ lazy.PlacesSyncUtils.extractUnknownFields(
+ visit,
+ VALID_VISIT_FIELDS
+ );
+ if (unknownVisitFields) {
+ // Visits don't have an id at the time of sync so we'll need
+ // to use the time instead until it's inserted in the DB
+ visitTimesWithUnknownFields.set(visit.date.getTime(), {
+ unknownVisitFields,
+ });
+ }
+ }
+ }
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.error("Failed to create a place info", ex);
+ this._log.trace("The record that failed", record);
+ failed.push(record.id);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+ }
+ });
+ if (toAdd.length || toRemove.length) {
+ if (toRemove.length) {
+ // PlacesUtils.history.remove takes an array of visits to remove,
+ // but the error semantics are tricky - a single "bad" entry will cause
+ // an exception before anything is removed. So we do remove them one at
+ // a time.
+ await Async.yieldingForEach(toRemove, async record => {
+ try {
+ await this.remove(record);
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.error("Failed to delete a place info", ex);
+ this._log.trace("The record that failed", record);
+ failed.push(record.id);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+ });
+ }
+ for (let chunk of this._generateChunks(toAdd)) {
+ // Per bug 1415560, we ignore any exceptions returned by insertMany
+ // as they are likely to be spurious. We do supply an onError handler
+ // and log the exceptions seen there as they are likely to be
+ // informative, but we still never abort the sync based on them.
+ let unknownFieldsToInsert = [];
+ try {
+ await lazy.PlacesUtils.history.insertMany(
+ chunk,
+ result => {
+ const placeToUpdate = pageGuidsWithUnknownFields.get(result.guid);
+ // Extract the placeId from this result so we can add the unknownFields
+ // to the proper table
+ if (placeToUpdate) {
+ unknownFieldsToInsert.push({
+ placeId: result.placeId,
+ unknownFields: placeToUpdate.unknownFields,
+ });
+ }
+ // same for visits
+ result.visits.forEach(visit => {
+ let visitToUpdate = visitTimesWithUnknownFields.get(
+ visit.date.getTime()
+ );
+ if (visitToUpdate) {
+ unknownFieldsToInsert.push({
+ visitId: visit.visitId,
+ unknownFields: visitToUpdate.unknownVisitFields,
+ });
+ }
+ });
+ },
+ failedVisit => {
+ this._log.info(
+ "Failed to insert a history record",
+ failedVisit.guid
+ );
+ this._log.trace("The record that failed", failedVisit);
+ failed.push(failedVisit.guid);
+ }
+ );
+ } catch (ex) {
+ this._log.info("Failed to insert history records", ex);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+
+ // All the top level places or visits that had unknown fields are sent
+ // to be added to the appropiate tables
+ await lazy.PlacesSyncUtils.history.updateUnknownFieldsBatch(
+ unknownFieldsToInsert
+ );
+ }
+ }
+
+ return failed;
+ },
+
+ /**
+ * Returns a generator that splits records into sanely sized chunks suitable
+ * for passing to places to prevent places doing bad things at shutdown.
+ */
+ *_generateChunks(records) {
+ // We chunk based on the number of *visits* inside each record. However,
+ // we do not split a single record into multiple records, because at some
+ // time in the future, we intend to ensure these records are ordered by
+ // lastModified, and advance the engine's timestamp as we process them,
+ // meaning we can resume exactly where we left off next sync - although
+ // currently that's not done, so we will retry the entire batch next sync
+ // if interrupted.
+ // ie, this means that if a single record has more than MAX_VISITS_PER_INSERT
+ // visits, we will call insertMany() with exactly 1 record, but with
+ // more than MAX_VISITS_PER_INSERT visits.
+ let curIndex = 0;
+ this._log.debug(`adding ${records.length} records to history`);
+ while (curIndex < records.length) {
+ Async.checkAppReady(); // may throw if we are shutting down.
+ let toAdd = []; // what we are going to insert.
+ let count = 0; // a counter which tells us when toAdd is full.
+ do {
+ let record = records[curIndex];
+ curIndex += 1;
+ toAdd.push(record);
+ count += record.visits.length;
+ } while (
+ curIndex < records.length &&
+ count + records[curIndex].visits.length <= this.MAX_VISITS_PER_INSERT
+ );
+ this._log.trace(`adding ${toAdd.length} items in this chunk`);
+ yield toAdd;
+ }
+ },
+
+ /* An internal helper to determine if we can add an entry to places.
+ Exists primarily so tests can override it.
+ */
+ _canAddURI(uri) {
+ return lazy.PlacesUtils.history.canAddURI(uri);
+ },
+
+ /**
+ * Converts a Sync history record to a mozIPlaceInfo.
+ *
+ * Throws if an invalid record is encountered (invalid URI, etc.),
+ * returns a new PageInfo object if the record is to be applied, null
+ * otherwise (no visits to add, etc.),
+ */
+ async _recordToPlaceInfo(record) {
+ // Sort out invalid URIs and ones Places just simply doesn't want.
+ record.url = lazy.PlacesUtils.normalizeToURLOrGUID(record.histUri);
+ record.uri = CommonUtils.makeURI(record.histUri);
+
+ if (!Utils.checkGUID(record.id)) {
+ this._log.warn("Encountered record with invalid GUID: " + record.id);
+ return null;
+ }
+ record.guid = record.id;
+
+ if (
+ !this._canAddURI(record.uri) ||
+ !this.engine.shouldSyncURL(record.uri.spec)
+ ) {
+ this._log.trace(
+ "Ignoring record " +
+ record.id +
+ " with URI " +
+ record.uri.spec +
+ ": can't add this URI."
+ );
+ return null;
+ }
+
+ // We dupe visits by date and type. So an incoming visit that has
+ // the same timestamp and type as a local one won't get applied.
+ // To avoid creating new objects, we rewrite the query result so we
+ // can simply check for containment below.
+ let curVisitsAsArray = [];
+ let curVisits = new Set();
+ try {
+ curVisitsAsArray = await lazy.PlacesSyncUtils.history.fetchVisitsForURL(
+ record.histUri
+ );
+ } catch (e) {
+ this._log.error(
+ "Error while fetching visits for URL ${record.histUri}",
+ record.histUri
+ );
+ }
+ let oldestAllowed =
+ lazy.PlacesSyncUtils.bookmarks.EARLIEST_BOOKMARK_TIMESTAMP;
+ if (curVisitsAsArray.length == 20) {
+ let oldestVisit = curVisitsAsArray[curVisitsAsArray.length - 1];
+ oldestAllowed = lazy.PlacesSyncUtils.history.clampVisitDate(
+ lazy.PlacesUtils.toDate(oldestVisit.date).getTime()
+ );
+ }
+
+ let i, k;
+ for (i = 0; i < curVisitsAsArray.length; i++) {
+ // Same logic as used in the loop below to generate visitKey.
+ let { date, type } = curVisitsAsArray[i];
+ let dateObj = lazy.PlacesUtils.toDate(date);
+ let millis = lazy.PlacesSyncUtils.history
+ .clampVisitDate(dateObj)
+ .getTime();
+ curVisits.add(`${millis},${type}`);
+ }
+
+ // Walk through the visits, make sure we have sound data, and eliminate
+ // dupes. The latter is done by rewriting the array in-place.
+ for (i = 0, k = 0; i < record.visits.length; i++) {
+ let visit = (record.visits[k] = record.visits[i]);
+
+ if (
+ !visit.date ||
+ typeof visit.date != "number" ||
+ !Number.isInteger(visit.date)
+ ) {
+ this._log.warn(
+ "Encountered record with invalid visit date: " + visit.date
+ );
+ continue;
+ }
+
+ if (
+ !visit.type ||
+ !Object.values(lazy.PlacesUtils.history.TRANSITIONS).includes(
+ visit.type
+ )
+ ) {
+ this._log.warn(
+ "Encountered record with invalid visit type: " +
+ visit.type +
+ "; ignoring."
+ );
+ continue;
+ }
+
+ // Dates need to be integers. Future and far past dates are clamped to the
+ // current date and earliest sensible date, respectively.
+ let originalVisitDate = lazy.PlacesUtils.toDate(Math.round(visit.date));
+ visit.date =
+ lazy.PlacesSyncUtils.history.clampVisitDate(originalVisitDate);
+
+ if (visit.date.getTime() < oldestAllowed) {
+ // Visit is older than the oldest visit we have, and we have so many
+ // visits for this uri that we hit our limit when inserting.
+ continue;
+ }
+ let visitKey = `${visit.date.getTime()},${visit.type}`;
+ if (curVisits.has(visitKey)) {
+ // Visit is a dupe, don't increment 'k' so the element will be
+ // overwritten.
+ continue;
+ }
+
+ // Note the visit key, so that we don't add duplicate visits with
+ // clamped timestamps.
+ curVisits.add(visitKey);
+
+ visit.transition = visit.type;
+ k += 1;
+ }
+ record.visits.length = k; // truncate array
+
+ // No update if there aren't any visits to apply.
+ // History wants at least one visit.
+ // In any case, the only thing we could change would be the title
+ // and that shouldn't change without a visit.
+ if (!record.visits.length) {
+ this._log.trace(
+ "Ignoring record " +
+ record.id +
+ " with URI " +
+ record.uri.spec +
+ ": no visits to add."
+ );
+ return null;
+ }
+
+ // PageInfo is validated using validateItemProperties which does a shallow
+ // copy of the properties. Since record uses getters some of the properties
+ // are not copied over. Thus we create and return a new object.
+ let pageInfo = {
+ title: record.title,
+ url: record.url,
+ guid: record.guid,
+ visits: record.visits,
+ };
+
+ return pageInfo;
+ },
+
+ async remove(record) {
+ this._log.trace("Removing page: " + record.id);
+ let removed = await lazy.PlacesUtils.history.remove(record.id);
+ if (removed) {
+ this._log.trace("Removed page: " + record.id);
+ } else {
+ this._log.debug("Page already removed: " + record.id);
+ }
+ },
+
+ async itemExists(id) {
+ return !!(await lazy.PlacesSyncUtils.history.fetchURLInfoForGuid(id));
+ },
+
+ async createRecord(id, collection) {
+ let foo = await lazy.PlacesSyncUtils.history.fetchURLInfoForGuid(id);
+ let record = new HistoryRec(collection, id);
+ if (foo) {
+ record.histUri = foo.url;
+ record.title = foo.title;
+ record.sortindex = foo.frecency;
+
+ // If we had any unknown fields, ensure we put it back on the
+ // top-level record
+ if (foo.unknownFields) {
+ let unknownFields = JSON.parse(foo.unknownFields);
+ Object.assign(record.cleartext, unknownFields);
+ }
+
+ try {
+ record.visits = await lazy.PlacesSyncUtils.history.fetchVisitsForURL(
+ record.histUri
+ );
+ } catch (e) {
+ this._log.error(
+ "Error while fetching visits for URL ${record.histUri}",
+ record.histUri
+ );
+ record.visits = [];
+ }
+ } else {
+ record.deleted = true;
+ }
+
+ return record;
+ },
+
+ async wipe() {
+ return lazy.PlacesSyncUtils.history.wipe();
+ },
+};
+Object.setPrototypeOf(HistoryStore.prototype, Store.prototype);
+
+function HistoryTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+HistoryTracker.prototype = {
+ onStart() {
+ this._log.info("Adding Places observer.");
+ this._placesObserver = new PlacesWeakCallbackWrapper(
+ this.handlePlacesEvents.bind(this)
+ );
+ PlacesObservers.addListener(
+ ["page-visited", "history-cleared", "page-removed"],
+ this._placesObserver
+ );
+ },
+
+ onStop() {
+ this._log.info("Removing Places observer.");
+ if (this._placesObserver) {
+ PlacesObservers.removeListener(
+ ["page-visited", "history-cleared", "page-removed"],
+ this._placesObserver
+ );
+ }
+ },
+
+ QueryInterface: ChromeUtils.generateQI(["nsISupportsWeakReference"]),
+
+ handlePlacesEvents(aEvents) {
+ this.asyncObserver.enqueueCall(() => this._handlePlacesEvents(aEvents));
+ },
+
+ async _handlePlacesEvents(aEvents) {
+ if (this.ignoreAll) {
+ this._log.trace(
+ "ignoreAll: ignoring visits [" +
+ aEvents.map(v => v.guid).join(",") +
+ "]"
+ );
+ return;
+ }
+ for (let event of aEvents) {
+ switch (event.type) {
+ case "page-visited": {
+ this._log.trace("'page-visited': " + event.url);
+ if (
+ this.engine.shouldSyncURL(event.url) &&
+ (await this.addChangedID(event.pageGuid))
+ ) {
+ this.score += SCORE_INCREMENT_SMALL;
+ }
+ break;
+ }
+ case "history-cleared": {
+ this._log.trace("history-cleared");
+ // Note that we're going to trigger a sync, but none of the cleared
+ // pages are tracked, so the deletions will not be propagated.
+ // See Bug 578694.
+ this.score += SCORE_INCREMENT_XLARGE;
+ break;
+ }
+ case "page-removed": {
+ if (event.reason === PlacesVisitRemoved.REASON_EXPIRED) {
+ return;
+ }
+
+ this._log.trace(
+ "page-removed: " + event.url + ", reason " + event.reason
+ );
+ const added = await this.addChangedID(event.pageGuid);
+ if (added) {
+ this.score += event.isRemovedFromStore
+ ? SCORE_INCREMENT_XLARGE
+ : SCORE_INCREMENT_SMALL;
+ }
+ break;
+ }
+ }
+ }
+ },
+};
+Object.setPrototypeOf(HistoryTracker.prototype, LegacyTracker.prototype);
diff --git a/services/sync/modules/engines/passwords.sys.mjs b/services/sync/modules/engines/passwords.sys.mjs
new file mode 100644
index 0000000000..8dea5664be
--- /dev/null
+++ b/services/sync/modules/engines/passwords.sys.mjs
@@ -0,0 +1,546 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import { CollectionValidator } from "resource://services-sync/collection_validator.sys.mjs";
+import {
+ Changeset,
+ Store,
+ SyncEngine,
+ Tracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+// These are valid fields the server could have for a logins record
+// we mainly use this to detect if there are any unknownFields and
+// store (but don't process) those fields to roundtrip them back
+const VALID_LOGIN_FIELDS = [
+ "id",
+ "displayOrigin",
+ "formSubmitURL",
+ "formActionOrigin",
+ "httpRealm",
+ "hostname",
+ "origin",
+ "password",
+ "passwordField",
+ "timeCreated",
+ "timeLastUsed",
+ "timePasswordChanged",
+ "timesUsed",
+ "username",
+ "usernameField",
+ "everSynced",
+ "syncCounter",
+ "unknownFields",
+];
+
+import { LoginManagerStorage } from "resource://passwordmgr/passwordstorage.sys.mjs";
+
+// Sync and many tests rely on having an time that is rounded to the nearest
+// 100th of a second otherwise tests can fail intermittently.
+function roundTimeForSync(time) {
+ return Math.round(time / 10) / 100;
+}
+
+export function LoginRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+LoginRec.prototype = {
+ _logName: "Sync.Record.Login",
+
+ cleartextToString() {
+ let o = Object.assign({}, this.cleartext);
+ if (o.password) {
+ o.password = "X".repeat(o.password.length);
+ }
+ return JSON.stringify(o);
+ },
+};
+Object.setPrototypeOf(LoginRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(LoginRec, "cleartext", [
+ "hostname",
+ "formSubmitURL",
+ "httpRealm",
+ "username",
+ "password",
+ "usernameField",
+ "passwordField",
+ "timeCreated",
+ "timePasswordChanged",
+]);
+
+export function PasswordEngine(service) {
+ SyncEngine.call(this, "Passwords", service);
+}
+
+PasswordEngine.prototype = {
+ _storeObj: PasswordStore,
+ _trackerObj: PasswordTracker,
+ _recordObj: LoginRec,
+
+ syncPriority: 2,
+
+ emptyChangeset() {
+ return new PasswordsChangeset();
+ },
+
+ async ensureCurrentSyncID(newSyncID) {
+ return Services.logins.ensureCurrentSyncID(newSyncID);
+ },
+
+ async getLastSync() {
+ let legacyValue = await super.getLastSync();
+ if (legacyValue) {
+ await this.setLastSync(legacyValue);
+ Svc.PrefBranch.clearUserPref(this.name + ".lastSync");
+ this._log.debug(
+ `migrated timestamp of ${legacyValue} to the logins store`
+ );
+ return legacyValue;
+ }
+ return this._store.storage.getLastSync();
+ },
+
+ async setLastSync(timestamp) {
+ await this._store.storage.setLastSync(timestamp);
+ },
+
+ // Testing function to emulate that a login has been synced.
+ async markSynced(guid) {
+ this._store.storage.resetSyncCounter(guid, 0);
+ },
+
+ async pullAllChanges() {
+ return this._getChangedIDs(true);
+ },
+
+ async getChangedIDs() {
+ return this._getChangedIDs(false);
+ },
+
+ async _getChangedIDs(getAll) {
+ let changes = {};
+
+ let logins = await this._store.storage.getAllLogins(true);
+ for (let login of logins) {
+ if (getAll || login.syncCounter > 0) {
+ if (Utils.getSyncCredentialsHosts().has(login.origin)) {
+ continue;
+ }
+
+ changes[login.guid] = {
+ counter: login.syncCounter, // record the initial counter value
+ modified: roundTimeForSync(login.timePasswordChanged),
+ deleted: this._store.storage.loginIsDeleted(login.guid),
+ };
+ }
+ }
+
+ return changes;
+ },
+
+ async trackRemainingChanges() {
+ // Reset the syncCounter on the items that were changed.
+ for (let [guid, { counter, synced }] of Object.entries(
+ this._modified.changes
+ )) {
+ if (synced) {
+ this._store.storage.resetSyncCounter(guid, counter);
+ }
+ }
+ },
+
+ async _findDupe(item) {
+ let login = this._store._nsLoginInfoFromRecord(item);
+ if (!login) {
+ return null;
+ }
+
+ let logins = await this._store.storage.searchLoginsAsync({
+ origin: login.origin,
+ formActionOrigin: login.formActionOrigin,
+ httpRealm: login.httpRealm,
+ });
+
+ // Look for existing logins that match the origin, but ignore the password.
+ for (let local of logins) {
+ if (login.matches(local, true) && local instanceof Ci.nsILoginMetaInfo) {
+ return local.guid;
+ }
+ }
+
+ return null;
+ },
+
+ _deleteId(id) {
+ this._noteDeletedId(id);
+ },
+
+ getValidator() {
+ return new PasswordValidator();
+ },
+};
+Object.setPrototypeOf(PasswordEngine.prototype, SyncEngine.prototype);
+
+function PasswordStore(name, engine) {
+ Store.call(this, name, engine);
+ this._nsLoginInfo = new Components.Constructor(
+ "@mozilla.org/login-manager/loginInfo;1",
+ Ci.nsILoginInfo,
+ "init"
+ );
+ this.storage = LoginManagerStorage.create();
+}
+PasswordStore.prototype = {
+ _newPropertyBag() {
+ return Cc["@mozilla.org/hash-property-bag;1"].createInstance(
+ Ci.nsIWritablePropertyBag2
+ );
+ },
+
+ // Returns an stringified object of any fields not "known" by this client
+ // mainly used to to prevent data loss for other clients by roundtripping
+ // these fields without processing them
+ _processUnknownFields(record) {
+ let unknownFields = {};
+ let keys = Object.keys(record);
+ keys
+ .filter(key => !VALID_LOGIN_FIELDS.includes(key))
+ .forEach(key => {
+ unknownFields[key] = record[key];
+ });
+ // If we found some unknown fields, we stringify it to be able
+ // to properly encrypt it for roundtripping since we can't know if
+ // it contained sensitive fields or not
+ if (Object.keys(unknownFields).length) {
+ return JSON.stringify(unknownFields);
+ }
+ return null;
+ },
+
+ /**
+ * Return an instance of nsILoginInfo (and, implicitly, nsILoginMetaInfo).
+ */
+ _nsLoginInfoFromRecord(record) {
+ function nullUndefined(x) {
+ return x == undefined ? null : x;
+ }
+
+ function stringifyNullUndefined(x) {
+ return x == undefined || x == null ? "" : x;
+ }
+
+ if (record.formSubmitURL && record.httpRealm) {
+ this._log.warn(
+ "Record " +
+ record.id +
+ " has both formSubmitURL and httpRealm. Skipping."
+ );
+ return null;
+ }
+
+ // Passing in "undefined" results in an empty string, which later
+ // counts as a value. Explicitly `|| null` these fields according to JS
+ // truthiness. Records with empty strings or null will be unmolested.
+ let info = new this._nsLoginInfo(
+ record.hostname,
+ nullUndefined(record.formSubmitURL),
+ nullUndefined(record.httpRealm),
+ stringifyNullUndefined(record.username),
+ record.password,
+ record.usernameField,
+ record.passwordField
+ );
+
+ info.QueryInterface(Ci.nsILoginMetaInfo);
+ info.guid = record.id;
+ if (record.timeCreated && !isNaN(new Date(record.timeCreated).getTime())) {
+ info.timeCreated = record.timeCreated;
+ }
+ if (
+ record.timePasswordChanged &&
+ !isNaN(new Date(record.timePasswordChanged).getTime())
+ ) {
+ info.timePasswordChanged = record.timePasswordChanged;
+ }
+
+ // Check the record if there are any unknown fields from other clients
+ // that we want to roundtrip during sync to prevent data loss
+ let unknownFields = this._processUnknownFields(record.cleartext);
+ if (unknownFields) {
+ info.unknownFields = unknownFields;
+ }
+ return info;
+ },
+
+ async _getLoginFromGUID(guid) {
+ let logins = await this.storage.searchLoginsAsync({ guid }, true);
+ if (logins.length) {
+ this._log.trace(logins.length + " items matching " + guid + " found.");
+ return logins[0];
+ }
+
+ this._log.trace("No items matching " + guid + " found. Ignoring");
+ return null;
+ },
+
+ async applyIncoming(record) {
+ if (record.deleted) {
+ // Need to supply the sourceSync flag.
+ await this.remove(record, { sourceSync: true });
+ return;
+ }
+
+ await super.applyIncoming(record);
+ },
+
+ async getAllIDs() {
+ let items = {};
+ let logins = await this.storage.getAllLogins(true);
+
+ for (let i = 0; i < logins.length; i++) {
+ // Skip over Weave password/passphrase entries.
+ let metaInfo = logins[i].QueryInterface(Ci.nsILoginMetaInfo);
+ if (Utils.getSyncCredentialsHosts().has(metaInfo.origin)) {
+ continue;
+ }
+
+ items[metaInfo.guid] = metaInfo;
+ }
+
+ return items;
+ },
+
+ async changeItemID(oldID, newID) {
+ this._log.trace("Changing item ID: " + oldID + " to " + newID);
+
+ if (!(await this.itemExists(oldID))) {
+ this._log.trace("Can't change item ID: item doesn't exist");
+ return;
+ }
+ if (await this._getLoginFromGUID(newID)) {
+ this._log.trace("Can't change item ID: new ID already in use");
+ return;
+ }
+
+ let prop = this._newPropertyBag();
+ prop.setPropertyAsAUTF8String("guid", newID);
+
+ let oldLogin = await this._getLoginFromGUID(oldID);
+ this.storage.modifyLogin(oldLogin, prop, true);
+ },
+
+ async itemExists(id) {
+ let login = await this._getLoginFromGUID(id);
+ return login && !this.storage.loginIsDeleted(id);
+ },
+
+ async createRecord(id, collection) {
+ let record = new LoginRec(collection, id);
+ let login = await this._getLoginFromGUID(id);
+
+ if (!login || this.storage.loginIsDeleted(id)) {
+ record.deleted = true;
+ return record;
+ }
+
+ record.hostname = login.origin;
+ record.formSubmitURL = login.formActionOrigin;
+ record.httpRealm = login.httpRealm;
+ record.username = login.username;
+ record.password = login.password;
+ record.usernameField = login.usernameField;
+ record.passwordField = login.passwordField;
+
+ // Optional fields.
+ login.QueryInterface(Ci.nsILoginMetaInfo);
+ record.timeCreated = login.timeCreated;
+ record.timePasswordChanged = login.timePasswordChanged;
+
+ // put the unknown fields back to the top-level record
+ // during upload
+ if (login.unknownFields) {
+ let unknownFields = JSON.parse(login.unknownFields);
+ if (unknownFields) {
+ Object.keys(unknownFields).forEach(key => {
+ // We have to manually add it to the cleartext since that's
+ // what gets processed during upload
+ record.cleartext[key] = unknownFields[key];
+ });
+ }
+ }
+
+ return record;
+ },
+
+ async create(record) {
+ let login = this._nsLoginInfoFromRecord(record);
+ if (!login) {
+ return;
+ }
+
+ login.everSynced = true;
+
+ this._log.trace("Adding login for " + record.hostname);
+ this._log.trace(
+ "httpRealm: " +
+ JSON.stringify(login.httpRealm) +
+ "; " +
+ "formSubmitURL: " +
+ JSON.stringify(login.formActionOrigin)
+ );
+ await Services.logins.addLoginAsync(login);
+ },
+
+ async remove(record, { sourceSync = false } = {}) {
+ this._log.trace("Removing login " + record.id);
+
+ let loginItem = await this._getLoginFromGUID(record.id);
+ if (!loginItem) {
+ this._log.trace("Asked to remove record that doesn't exist, ignoring");
+ return;
+ }
+
+ this.storage.removeLogin(loginItem, sourceSync);
+ },
+
+ async update(record) {
+ let loginItem = await this._getLoginFromGUID(record.id);
+ if (!loginItem || this.storage.loginIsDeleted(record.id)) {
+ this._log.trace("Skipping update for unknown item: " + record.hostname);
+ return;
+ }
+
+ this._log.trace("Updating " + record.hostname);
+ let newinfo = this._nsLoginInfoFromRecord(record);
+ if (!newinfo) {
+ return;
+ }
+
+ loginItem.everSynced = true;
+
+ this.storage.modifyLogin(loginItem, newinfo, true);
+ },
+
+ async wipe() {
+ this.storage.removeAllUserFacingLogins(true);
+ },
+};
+Object.setPrototypeOf(PasswordStore.prototype, Store.prototype);
+
+function PasswordTracker(name, engine) {
+ Tracker.call(this, name, engine);
+}
+PasswordTracker.prototype = {
+ onStart() {
+ Svc.Obs.add("passwordmgr-storage-changed", this.asyncObserver);
+ },
+
+ onStop() {
+ Svc.Obs.remove("passwordmgr-storage-changed", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ if (this.ignoreAll) {
+ return;
+ }
+
+ switch (data) {
+ case "modifyLogin":
+ // The syncCounter should have been incremented only for
+ // those items that need to be sycned.
+ if (
+ subject.QueryInterface(Ci.nsIArrayExtensions).GetElementAt(1)
+ .syncCounter > 0
+ ) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ }
+ break;
+
+ case "addLogin":
+ case "removeLogin":
+ case "importLogins":
+ this.score += SCORE_INCREMENT_XLARGE;
+ break;
+
+ case "removeAllLogins":
+ this.score +=
+ SCORE_INCREMENT_XLARGE *
+ (subject.QueryInterface(Ci.nsIArrayExtensions).Count() + 1);
+ break;
+ }
+ },
+};
+Object.setPrototypeOf(PasswordTracker.prototype, Tracker.prototype);
+
+export class PasswordValidator extends CollectionValidator {
+ constructor() {
+ super("passwords", "id", [
+ "hostname",
+ "formSubmitURL",
+ "httpRealm",
+ "password",
+ "passwordField",
+ "username",
+ "usernameField",
+ ]);
+ }
+
+ async getClientItems() {
+ let logins = await Services.logins.getAllLogins();
+ let syncHosts = Utils.getSyncCredentialsHosts();
+ let result = logins
+ .map(l => l.QueryInterface(Ci.nsILoginMetaInfo))
+ .filter(l => !syncHosts.has(l.origin));
+ return Promise.resolve(result);
+ }
+
+ normalizeClientItem(item) {
+ return {
+ id: item.guid,
+ guid: item.guid,
+ hostname: item.hostname,
+ formSubmitURL: item.formSubmitURL,
+ httpRealm: item.httpRealm,
+ password: item.password,
+ passwordField: item.passwordField,
+ username: item.username,
+ usernameField: item.usernameField,
+ original: item,
+ };
+ }
+
+ async normalizeServerItem(item) {
+ return Object.assign({ guid: item.id }, item);
+ }
+}
+
+export class PasswordsChangeset extends Changeset {
+ getModifiedTimestamp(id) {
+ return this.changes[id].modified;
+ }
+
+ has(id) {
+ let change = this.changes[id];
+ if (change) {
+ return !change.synced;
+ }
+ return false;
+ }
+
+ delete(id) {
+ let change = this.changes[id];
+ if (change) {
+ // Mark the change as synced without removing it from the set.
+ // This allows the sync counter to be reset when sync is complete
+ // within trackRemainingChanges.
+ change.synced = true;
+ }
+ }
+}
diff --git a/services/sync/modules/engines/prefs.sys.mjs b/services/sync/modules/engines/prefs.sys.mjs
new file mode 100644
index 0000000000..f29a9e7b59
--- /dev/null
+++ b/services/sync/modules/engines/prefs.sys.mjs
@@ -0,0 +1,503 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Prefs which start with this prefix are our "control" prefs - they indicate
+// which preferences should be synced.
+const PREF_SYNC_PREFS_PREFIX = "services.sync.prefs.sync.";
+
+// Prefs which have a default value are usually not synced - however, if the
+// preference exists under this prefix and the value is:
+// * `true`, then we do sync default values.
+// * `false`, then as soon as we ever sync a non-default value out, or sync
+// any value in, then we toggle the value to `true`.
+//
+// We never explicitly set this pref back to false, so it's one-shot.
+// Some preferences which are known to have a different default value on
+// different platforms have this preference with a default value of `false`,
+// so they don't sync until one device changes to the non-default value, then
+// that value forever syncs, even if it gets reset back to the default.
+// Note that preferences handled this way *must also* have the "normal"
+// control pref set.
+// A possible future enhancement would be to sync these prefs so that
+// other distributions can flag them if they change the default, but that
+// doesn't seem worthwhile until we can be confident they'd actually create
+// this special control pref at the same time they flip the default.
+const PREF_SYNC_SEEN_PREFIX = "services.sync.prefs.sync-seen.";
+
+import {
+ Store,
+ SyncEngine,
+ Tracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineLazyGetter(lazy, "PREFS_GUID", () =>
+ CommonUtils.encodeBase64URL(Services.appinfo.ID)
+);
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+});
+
+// In bug 1538015, we decided that it isn't always safe to allow all "incoming"
+// preferences to be applied locally. So we introduced another preference to control
+// this for backward compatibility. We removed that capability in bug 1854698, but in the
+// interests of working well between different versions of Firefox, we still forever
+// want to prevent this preference from syncing.
+// This was the name of the "control" pref.
+const PREF_SYNC_PREFS_ARBITRARY =
+ "services.sync.prefs.dangerously_allow_arbitrary";
+
+// Check for a local control pref or PREF_SYNC_PREFS_ARBITRARY
+function isAllowedPrefName(prefName) {
+ if (prefName == PREF_SYNC_PREFS_ARBITRARY) {
+ return false; // never allow this.
+ }
+ // The pref must already have a control pref set, although it doesn't matter
+ // here whether that value is true or false. We can't use prefHasUserValue
+ // here because we also want to check prefs still with default values.
+ try {
+ Services.prefs.getBoolPref(PREF_SYNC_PREFS_PREFIX + prefName);
+ // pref exists!
+ return true;
+ } catch (_) {
+ return false;
+ }
+}
+
+export function PrefRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+PrefRec.prototype = {
+ _logName: "Sync.Record.Pref",
+};
+Object.setPrototypeOf(PrefRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(PrefRec, "cleartext", ["value"]);
+
+export function PrefsEngine(service) {
+ SyncEngine.call(this, "Prefs", service);
+}
+
+PrefsEngine.prototype = {
+ _storeObj: PrefStore,
+ _trackerObj: PrefTracker,
+ _recordObj: PrefRec,
+ version: 2,
+
+ syncPriority: 1,
+ allowSkippedRecord: false,
+
+ async getChangedIDs() {
+ // No need for a proper timestamp (no conflict resolution needed).
+ let changedIDs = {};
+ if (this._tracker.modified) {
+ changedIDs[lazy.PREFS_GUID] = 0;
+ }
+ return changedIDs;
+ },
+
+ async _wipeClient() {
+ await SyncEngine.prototype._wipeClient.call(this);
+ this.justWiped = true;
+ },
+
+ async _reconcile(item) {
+ // Apply the incoming item if we don't care about the local data
+ if (this.justWiped) {
+ this.justWiped = false;
+ return true;
+ }
+ return SyncEngine.prototype._reconcile.call(this, item);
+ },
+
+ async _uploadOutgoing() {
+ try {
+ await SyncEngine.prototype._uploadOutgoing.call(this);
+ } finally {
+ this._store._incomingPrefs = null;
+ }
+ },
+
+ async trackRemainingChanges() {
+ if (this._modified.count() > 0) {
+ this._tracker.modified = true;
+ }
+ },
+};
+Object.setPrototypeOf(PrefsEngine.prototype, SyncEngine.prototype);
+
+// We don't use services.sync.engine.tabs.filteredSchemes since it includes
+// about: pages and the like, which we want to be syncable in preferences.
+// Blob, moz-extension, data and file uris are never safe to sync,
+// so we limit our check to those.
+const UNSYNCABLE_URL_REGEXP = /^(moz-extension|blob|data|file):/i;
+function isUnsyncableURLPref(prefName) {
+ if (Services.prefs.getPrefType(prefName) != Ci.nsIPrefBranch.PREF_STRING) {
+ return false;
+ }
+ const prefValue = Services.prefs.getStringPref(prefName, "");
+ return UNSYNCABLE_URL_REGEXP.test(prefValue);
+}
+
+function PrefStore(name, engine) {
+ Store.call(this, name, engine);
+ Svc.Obs.add(
+ "profile-before-change",
+ function () {
+ this.__prefs = null;
+ },
+ this
+ );
+}
+PrefStore.prototype = {
+ __prefs: null,
+ // used just for logging so we can work out why we chose to re-upload
+ _incomingPrefs: null,
+ get _prefs() {
+ if (!this.__prefs) {
+ this.__prefs = Services.prefs.getBranch("");
+ }
+ return this.__prefs;
+ },
+
+ _getSyncPrefs() {
+ let syncPrefs = Services.prefs
+ .getBranch(PREF_SYNC_PREFS_PREFIX)
+ .getChildList("")
+ .filter(pref => isAllowedPrefName(pref) && !isUnsyncableURLPref(pref));
+ // Also sync preferences that determine which prefs get synced.
+ let controlPrefs = syncPrefs.map(pref => PREF_SYNC_PREFS_PREFIX + pref);
+ return controlPrefs.concat(syncPrefs);
+ },
+
+ _isSynced(pref) {
+ if (pref.startsWith(PREF_SYNC_PREFS_PREFIX)) {
+ // this is an incoming control pref, which is ignored if there's not already
+ // a local control pref for the preference.
+ let controlledPref = pref.slice(PREF_SYNC_PREFS_PREFIX.length);
+ return isAllowedPrefName(controlledPref);
+ }
+
+ // This is the pref itself - it must be both allowed, and have a control
+ // pref which is true.
+ if (!this._prefs.getBoolPref(PREF_SYNC_PREFS_PREFIX + pref, false)) {
+ return false;
+ }
+ return isAllowedPrefName(pref);
+ },
+
+ // Given a preference name, returns either a string, bool, number or null.
+ _getPrefValue(pref) {
+ switch (this._prefs.getPrefType(pref)) {
+ case Ci.nsIPrefBranch.PREF_STRING:
+ return this._prefs.getStringPref(pref);
+ case Ci.nsIPrefBranch.PREF_INT:
+ return this._prefs.getIntPref(pref);
+ case Ci.nsIPrefBranch.PREF_BOOL:
+ return this._prefs.getBoolPref(pref);
+ // case Ci.nsIPrefBranch.PREF_INVALID: handled by the fallthrough
+ }
+ return null;
+ },
+
+ _getAllPrefs() {
+ let values = {};
+ for (let pref of this._getSyncPrefs()) {
+ // Note: _isSynced doesn't call isUnsyncableURLPref since it would cause
+ // us not to apply (syncable) changes to preferences that are set locally
+ // which have unsyncable urls.
+ if (this._isSynced(pref) && !isUnsyncableURLPref(pref)) {
+ let isSet = this._prefs.prefHasUserValue(pref);
+ // Missing and default prefs get the null value, unless that `seen`
+ // pref is set, in which case it always gets the value.
+ let forceValue = this._prefs.getBoolPref(
+ PREF_SYNC_SEEN_PREFIX + pref,
+ false
+ );
+ if (isSet || forceValue) {
+ values[pref] = this._getPrefValue(pref);
+ } else {
+ values[pref] = null;
+ }
+ // If incoming and outgoing don't match then either the user toggled a
+ // pref that doesn't match an incoming non-default value for that pref
+ // during a sync (unlikely!) or it refused to stick and is behaving oddly.
+ if (this._incomingPrefs) {
+ let inValue = this._incomingPrefs[pref];
+ let outValue = values[pref];
+ if (inValue != null && outValue != null && inValue != outValue) {
+ this._log.debug(`Incoming pref '${pref}' refused to stick?`);
+ this._log.trace(`Incoming: '${inValue}', outgoing: '${outValue}'`);
+ }
+ }
+ // If this is a special "sync-seen" pref, and it's not the default value,
+ // set the seen pref to true.
+ if (
+ isSet &&
+ this._prefs.getBoolPref(PREF_SYNC_SEEN_PREFIX + pref, false) === false
+ ) {
+ this._log.trace(`toggling sync-seen pref for '${pref}' to true`);
+ this._prefs.setBoolPref(PREF_SYNC_SEEN_PREFIX + pref, true);
+ }
+ }
+ }
+ return values;
+ },
+
+ _maybeLogPrefChange(pref, incomingValue, existingValue) {
+ if (incomingValue != existingValue) {
+ this._log.debug(`Adjusting preference "${pref}" to the incoming value`);
+ // values are PII, so must only be logged at trace.
+ this._log.trace(`Existing: ${existingValue}. Incoming: ${incomingValue}`);
+ }
+ },
+
+ _setAllPrefs(values) {
+ const selectedThemeIDPref = "extensions.activeThemeID";
+ let selectedThemeIDBefore = this._prefs.getStringPref(
+ selectedThemeIDPref,
+ null
+ );
+ let selectedThemeIDAfter = selectedThemeIDBefore;
+
+ // Update 'services.sync.prefs.sync.foo.pref' before 'foo.pref', otherwise
+ // _isSynced returns false when 'foo.pref' doesn't exist (e.g., on a new device).
+ let prefs = Object.keys(values).sort(
+ a => -a.indexOf(PREF_SYNC_PREFS_PREFIX)
+ );
+ for (let pref of prefs) {
+ let value = values[pref];
+ if (!this._isSynced(pref)) {
+ // It's unusual for us to find an incoming preference (ie, a pref some other
+ // instance thinks is syncable) which we don't think is syncable.
+ this._log.trace(`Ignoring incoming unsyncable preference "${pref}"`);
+ continue;
+ }
+
+ if (typeof value == "string" && UNSYNCABLE_URL_REGEXP.test(value)) {
+ this._log.trace(`Skipping incoming unsyncable url for pref: ${pref}`);
+ continue;
+ }
+
+ switch (pref) {
+ // Some special prefs we don't want to set directly.
+ case selectedThemeIDPref:
+ selectedThemeIDAfter = value;
+ break;
+
+ // default is to just set the pref
+ default:
+ if (value == null) {
+ // Pref has gone missing. The best we can do is reset it.
+ if (this._prefs.prefHasUserValue(pref)) {
+ this._log.debug(`Clearing existing local preference "${pref}"`);
+ this._log.trace(
+ `Existing local value for preference: ${this._getPrefValue(
+ pref
+ )}`
+ );
+ }
+ this._prefs.clearUserPref(pref);
+ } else {
+ try {
+ switch (typeof value) {
+ case "string":
+ this._maybeLogPrefChange(
+ pref,
+ value,
+ this._prefs.getStringPref(pref, undefined)
+ );
+ this._prefs.setStringPref(pref, value);
+ break;
+ case "number":
+ this._maybeLogPrefChange(
+ pref,
+ value,
+ this._prefs.getIntPref(pref, undefined)
+ );
+ this._prefs.setIntPref(pref, value);
+ break;
+ case "boolean":
+ this._maybeLogPrefChange(
+ pref,
+ value,
+ this._prefs.getBoolPref(pref, undefined)
+ );
+ this._prefs.setBoolPref(pref, value);
+ break;
+ }
+ } catch (ex) {
+ this._log.trace(`Failed to set pref: ${pref}`, ex);
+ }
+ }
+ // If there's a "sync-seen" pref for this it gets toggled to true
+ // regardless of the value.
+ let seenPref = PREF_SYNC_SEEN_PREFIX + pref;
+ if (
+ this._prefs.getPrefType(seenPref) != Ci.nsIPrefBranch.PREF_INVALID
+ ) {
+ this._prefs.setBoolPref(PREF_SYNC_SEEN_PREFIX + pref, true);
+ }
+ }
+ }
+ // Themes are a little messy. Themes which have been installed are handled
+ // by the addons engine - but default themes aren't seen by that engine.
+ // So if there's a new default theme ID and that ID corresponds to a
+ // system addon, then we arrange to enable that addon here.
+ if (selectedThemeIDBefore != selectedThemeIDAfter) {
+ this._maybeEnableBuiltinTheme(selectedThemeIDAfter).catch(e => {
+ this._log.error("Failed to maybe update the default theme", e);
+ });
+ }
+ },
+
+ async _maybeEnableBuiltinTheme(themeId) {
+ let addon = null;
+ try {
+ addon = await lazy.AddonManager.getAddonByID(themeId);
+ } catch (ex) {
+ this._log.trace(
+ `There's no addon with ID '${themeId} - it can't be a builtin theme`
+ );
+ return;
+ }
+ if (addon && addon.isBuiltin && addon.type == "theme") {
+ this._log.trace(`Enabling builtin theme '${themeId}'`);
+ await addon.enable();
+ } else {
+ this._log.trace(
+ `Have incoming theme ID of '${themeId}' but it's not a builtin theme`
+ );
+ }
+ },
+
+ async getAllIDs() {
+ /* We store all prefs in just one WBO, with just one GUID */
+ let allprefs = {};
+ allprefs[lazy.PREFS_GUID] = true;
+ return allprefs;
+ },
+
+ async changeItemID(oldID, newID) {
+ this._log.trace("PrefStore GUID is constant!");
+ },
+
+ async itemExists(id) {
+ return id === lazy.PREFS_GUID;
+ },
+
+ async createRecord(id, collection) {
+ let record = new PrefRec(collection, id);
+
+ if (id == lazy.PREFS_GUID) {
+ record.value = this._getAllPrefs();
+ } else {
+ record.deleted = true;
+ }
+
+ return record;
+ },
+
+ async create(record) {
+ this._log.trace("Ignoring create request");
+ },
+
+ async remove(record) {
+ this._log.trace("Ignoring remove request");
+ },
+
+ async update(record) {
+ // Silently ignore pref updates that are for other apps.
+ if (record.id != lazy.PREFS_GUID) {
+ return;
+ }
+
+ this._log.trace("Received pref updates, applying...");
+ this._incomingPrefs = record.value;
+ this._setAllPrefs(record.value);
+ },
+
+ async wipe() {
+ this._log.trace("Ignoring wipe request");
+ },
+};
+Object.setPrototypeOf(PrefStore.prototype, Store.prototype);
+
+function PrefTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ this._ignoreAll = false;
+ Svc.Obs.add("profile-before-change", this.asyncObserver);
+}
+PrefTracker.prototype = {
+ get ignoreAll() {
+ return this._ignoreAll;
+ },
+
+ set ignoreAll(value) {
+ this._ignoreAll = value;
+ },
+
+ get modified() {
+ return Svc.PrefBranch.getBoolPref("engine.prefs.modified", false);
+ },
+ set modified(value) {
+ Svc.PrefBranch.setBoolPref("engine.prefs.modified", value);
+ },
+
+ clearChangedIDs: function clearChangedIDs() {
+ this.modified = false;
+ },
+
+ __prefs: null,
+ get _prefs() {
+ if (!this.__prefs) {
+ this.__prefs = Services.prefs.getBranch("");
+ }
+ return this.__prefs;
+ },
+
+ onStart() {
+ Services.prefs.addObserver("", this.asyncObserver);
+ },
+
+ onStop() {
+ this.__prefs = null;
+ Services.prefs.removeObserver("", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ switch (topic) {
+ case "profile-before-change":
+ await this.stop();
+ break;
+ case "nsPref:changed":
+ if (this.ignoreAll) {
+ break;
+ }
+ // Trigger a sync for MULTI-DEVICE for a change that determines
+ // which prefs are synced or a regular pref change.
+ if (
+ data.indexOf(PREF_SYNC_PREFS_PREFIX) == 0 ||
+ this._prefs.getBoolPref(PREF_SYNC_PREFS_PREFIX + data, false)
+ ) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ this.modified = true;
+ this._log.trace("Preference " + data + " changed");
+ }
+ break;
+ }
+ },
+};
+Object.setPrototypeOf(PrefTracker.prototype, Tracker.prototype);
+
+export function getPrefsGUIDForTest() {
+ return lazy.PREFS_GUID;
+}
diff --git a/services/sync/modules/engines/tabs.sys.mjs b/services/sync/modules/engines/tabs.sys.mjs
new file mode 100644
index 0000000000..861e051d1a
--- /dev/null
+++ b/services/sync/modules/engines/tabs.sys.mjs
@@ -0,0 +1,625 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const STORAGE_VERSION = 1; // This needs to be kept in-sync with the rust storage version
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+import {
+ SCORE_INCREMENT_SMALL,
+ STATUS_OK,
+ URI_LENGTH_MAX,
+} from "resource://services-sync/constants.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { Async } from "resource://services-common/async.sys.mjs";
+import {
+ SyncRecord,
+ SyncTelemetry,
+} from "resource://services-sync/telemetry.sys.mjs";
+import { BridgedEngine } from "resource://services-sync/bridged_engine.sys.mjs";
+
+const FAR_FUTURE = 4102405200000; // 2100/01/01
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+ PrivateBrowsingUtils: "resource://gre/modules/PrivateBrowsingUtils.sys.mjs",
+ ReaderMode: "resource://gre/modules/ReaderMode.sys.mjs",
+ TabsStore: "resource://gre/modules/RustTabs.sys.mjs",
+ RemoteTabRecord: "resource://gre/modules/RustTabs.sys.mjs",
+});
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "TABS_FILTERED_SCHEMES",
+ "services.sync.engine.tabs.filteredSchemes",
+ "",
+ null,
+ val => {
+ return new Set(val.split("|"));
+ }
+);
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "SYNC_AFTER_DELAY_MS",
+ "services.sync.syncedTabs.syncDelayAfterTabChange",
+ 0
+);
+
+// A "bridged engine" to our tabs component.
+export function TabEngine(service) {
+ BridgedEngine.call(this, "Tabs", service);
+}
+
+TabEngine.prototype = {
+ _trackerObj: TabTracker,
+ syncPriority: 3,
+
+ async prepareTheBridge(isQuickWrite) {
+ let clientsEngine = this.service.clientsEngine;
+ // Tell the bridged engine about clients.
+ // This is the same shape as ClientData in app-services.
+ // schema: https://github.com/mozilla/application-services/blob/a1168751231ed4e88c44d85f6dccc09c3b412bd2/components/sync15/src/client_types.rs#L14
+ let clientData = {
+ local_client_id: clientsEngine.localID,
+ recent_clients: {},
+ };
+
+ // We shouldn't upload tabs past what the server will accept
+ let tabs = await this.getTabsWithinPayloadSize();
+ await this._rustStore.setLocalTabs(
+ tabs.map(tab => {
+ // rust wants lastUsed in MS but the provider gives it in seconds
+ tab.lastUsed = tab.lastUsed * 1000;
+ return new lazy.RemoteTabRecord(tab);
+ })
+ );
+
+ for (let remoteClient of clientsEngine.remoteClients) {
+ let id = remoteClient.id;
+ if (!id) {
+ throw new Error("Remote client somehow did not have an id");
+ }
+ let client = {
+ fxa_device_id: remoteClient.fxaDeviceId,
+ // device_name and device_type are soft-deprecated - every client
+ // prefers what's in the FxA record. But fill them correctly anyway.
+ device_name: clientsEngine.getClientName(id) ?? "",
+ device_type: clientsEngine.getClientType(id),
+ };
+ clientData.recent_clients[id] = client;
+ }
+
+ // put ourself in there too so we record the correct device info in our sync record.
+ clientData.recent_clients[clientsEngine.localID] = {
+ fxa_device_id: await clientsEngine.fxAccounts.device.getLocalId(),
+ device_name: clientsEngine.localName,
+ device_type: clientsEngine.localType,
+ };
+
+ // Quick write needs to adjust the lastSync so we can POST to the server
+ // see quickWrite() for details
+ if (isQuickWrite) {
+ await this.setLastSync(FAR_FUTURE);
+ await this._bridge.prepareForSync(JSON.stringify(clientData));
+ return;
+ }
+
+ // Just incase we crashed while the lastSync timestamp was FAR_FUTURE, we
+ // reset it to zero
+ if ((await this.getLastSync()) === FAR_FUTURE) {
+ await this._bridge.setLastSync(0);
+ }
+ await this._bridge.prepareForSync(JSON.stringify(clientData));
+ },
+
+ async _syncStartup() {
+ await super._syncStartup();
+ await this.prepareTheBridge();
+ },
+
+ async initialize() {
+ await SyncEngine.prototype.initialize.call(this);
+
+ let path = PathUtils.join(PathUtils.profileDir, "synced-tabs.db");
+ this._rustStore = await lazy.TabsStore.init(path);
+ this._bridge = await this._rustStore.bridgedEngine();
+
+ // Uniffi doesn't currently only support async methods, so we'll need to hardcode
+ // these values for now (which is fine for now as these hardly ever change)
+ this._bridge.storageVersion = STORAGE_VERSION;
+ this._bridge.allowSkippedRecord = true;
+
+ this._log.info("Got a bridged engine!");
+ this._tracker.modified = true;
+ },
+
+ async getChangedIDs() {
+ // No need for a proper timestamp (no conflict resolution needed).
+ let changedIDs = {};
+ if (this._tracker.modified) {
+ changedIDs[this.service.clientsEngine.localID] = 0;
+ }
+ return changedIDs;
+ },
+
+ // API for use by Sync UI code to give user choices of tabs to open.
+ async getAllClients() {
+ let remoteTabs = await this._rustStore.getAll();
+ let remoteClientTabs = [];
+ for (let remoteClient of this.service.clientsEngine.remoteClients) {
+ // We get the some client info from the rust tabs engine and some from
+ // the clients engine.
+ let rustClient = remoteTabs.find(
+ x => x.clientId === remoteClient.fxaDeviceId
+ );
+ if (!rustClient) {
+ continue;
+ }
+ let client = {
+ // rust gives us ms but js uses seconds, so fix them up.
+ tabs: rustClient.remoteTabs.map(tab => {
+ tab.lastUsed = tab.lastUsed / 1000;
+ return tab;
+ }),
+ lastModified: rustClient.lastModified / 1000,
+ ...remoteClient,
+ };
+ remoteClientTabs.push(client);
+ }
+ return remoteClientTabs;
+ },
+
+ async removeClientData() {
+ let url = this.engineURL + "/" + this.service.clientsEngine.localID;
+ await this.service.resource(url).delete();
+ },
+
+ async trackRemainingChanges() {
+ if (this._modified.count() > 0) {
+ this._tracker.modified = true;
+ }
+ },
+
+ async getTabsWithinPayloadSize() {
+ const maxPayloadSize = this.service.getMaxRecordPayloadSize();
+ // See bug 535326 comment 8 for an explanation of the estimation
+ const maxSerializedSize = (maxPayloadSize / 4) * 3 - 1500;
+ return TabProvider.getAllTabsWithEstimatedMax(true, maxSerializedSize);
+ },
+
+ // Support for "quick writes"
+ _engineLock: Utils.lock,
+ _engineLocked: false,
+
+ // Tabs has a special lock to help support its "quick write"
+ get locked() {
+ return this._engineLocked;
+ },
+ lock() {
+ if (this._engineLocked) {
+ return false;
+ }
+ this._engineLocked = true;
+ return true;
+ },
+ unlock() {
+ this._engineLocked = false;
+ },
+
+ // Quickly do a POST of our current tabs if possible.
+ // This does things that would be dangerous for other engines - eg, posting
+ // without checking what's on the server could cause data-loss for other
+ // engines, but because each device exclusively owns exactly 1 tabs record
+ // with a known ID, it's safe here.
+ // Returns true if we successfully synced, false otherwise (either on error
+ // or because we declined to sync for any reason.) The return value is
+ // primarily for tests.
+ async quickWrite() {
+ if (!this.enabled) {
+ // this should be very rare, and only if tabs are disabled after the
+ // timer is created.
+ this._log.info("Can't do a quick-sync as tabs is disabled");
+ return false;
+ }
+ // This quick-sync doesn't drive the login state correctly, so just
+ // decline to sync if out status is bad
+ if (this.service.status.checkSetup() != STATUS_OK) {
+ this._log.info(
+ "Can't do a quick-sync due to the service status",
+ this.service.status.toString()
+ );
+ return false;
+ }
+ if (!this.service.serverConfiguration) {
+ this._log.info("Can't do a quick sync before the first full sync");
+ return false;
+ }
+ try {
+ return await this._engineLock("tabs.js: quickWrite", async () => {
+ // We want to restore the lastSync timestamp when complete so next sync
+ // takes tabs written by other devices since our last real sync.
+ // And for this POST we don't want the protections offered by
+ // X-If-Unmodified-Since - we want the POST to work even if the remote
+ // has moved on and we will catch back up next full sync.
+ const origLastSync = await this.getLastSync();
+ try {
+ return this._doQuickWrite();
+ } finally {
+ // set the lastSync to it's original value for regular sync
+ await this.setLastSync(origLastSync);
+ }
+ })();
+ } catch (ex) {
+ if (!Utils.isLockException(ex)) {
+ throw ex;
+ }
+ this._log.info(
+ "Can't do a quick-write as another tab sync is in progress"
+ );
+ return false;
+ }
+ },
+
+ // The guts of the quick-write sync, after we've taken the lock, checked
+ // the service status etc.
+ async _doQuickWrite() {
+ // We need to track telemetry for these syncs too!
+ const name = "tabs";
+ let telemetryRecord = new SyncRecord(
+ SyncTelemetry.allowedEngines,
+ "quick-write"
+ );
+ telemetryRecord.onEngineStart(name);
+ try {
+ Async.checkAppReady();
+ // We need to prep the bridge before we try to POST since it grabs
+ // the most recent local client id and properly sets a lastSync
+ // which is needed for a proper POST request
+ await this.prepareTheBridge(true);
+ this._tracker.clearChangedIDs();
+ this._tracker.resetScore();
+
+ Async.checkAppReady();
+ // now just the "upload" part of a sync,
+ // which for a rust engine is not obvious.
+ // We need to do is ask the rust engine for the changes. Although
+ // this is kinda abusing the bridged-engine interface, we know the tabs
+ // implementation of it works ok
+ let outgoing = await this._bridge.apply();
+ // We know we always have exactly 1 record.
+ let mine = outgoing[0];
+ this._log.trace("outgoing bso", mine);
+ // `this._recordObj` is a `BridgedRecord`, which isn't exported.
+ let record = this._recordObj.fromOutgoingBso(this.name, JSON.parse(mine));
+ let changeset = {};
+ changeset[record.id] = { synced: false, record };
+ this._modified.replace(changeset);
+
+ Async.checkAppReady();
+ await this._uploadOutgoing();
+ telemetryRecord.onEngineStop(name, null);
+ return true;
+ } catch (ex) {
+ this._log.warn("quicksync sync failed", ex);
+ telemetryRecord.onEngineStop(name, ex);
+ return false;
+ } finally {
+ // The top-level sync is never considered to fail here, just the engine
+ telemetryRecord.finished(null);
+ SyncTelemetry.takeTelemetryRecord(telemetryRecord);
+ }
+ },
+
+ async _sync() {
+ try {
+ await this._engineLock("tabs.js: fullSync", async () => {
+ await super._sync();
+ })();
+ } catch (ex) {
+ if (!Utils.isLockException(ex)) {
+ throw ex;
+ }
+ this._log.info(
+ "Can't do full tabs sync as a quick-write is currently running"
+ );
+ }
+ },
+};
+Object.setPrototypeOf(TabEngine.prototype, BridgedEngine.prototype);
+
+export const TabProvider = {
+ getWindowEnumerator() {
+ return Services.wm.getEnumerator("navigator:browser");
+ },
+
+ shouldSkipWindow(win) {
+ return win.closed || lazy.PrivateBrowsingUtils.isWindowPrivate(win);
+ },
+
+ getAllBrowserTabs() {
+ let tabs = [];
+ for (let win of this.getWindowEnumerator()) {
+ if (this.shouldSkipWindow(win)) {
+ continue;
+ }
+ // Get all the tabs from the browser
+ for (let tab of win.gBrowser.tabs) {
+ tabs.push(tab);
+ }
+ }
+
+ return tabs.sort(function (a, b) {
+ return b.lastAccessed - a.lastAccessed;
+ });
+ },
+
+ // This function creates tabs records up to a specified amount of bytes
+ // It is an "estimation" since we don't accurately calculate how much the
+ // favicon and JSON overhead is and give a rough estimate (for optimization purposes)
+ async getAllTabsWithEstimatedMax(filter, bytesMax) {
+ let log = Log.repository.getLogger(`Sync.Engine.Tabs.Provider`);
+ let tabRecords = [];
+ let iconPromises = [];
+ let runningByteLength = 0;
+ let encoder = new TextEncoder();
+
+ // Fetch all the tabs the user has open
+ let winTabs = this.getAllBrowserTabs();
+
+ for (let tab of winTabs) {
+ // We don't want to process any more tabs than we can sync
+ if (runningByteLength >= bytesMax) {
+ log.warn(
+ `Can't fit all tabs in sync payload: have ${winTabs.length},
+ but can only fit ${tabRecords.length}.`
+ );
+ break;
+ }
+
+ // Note that we used to sync "tab history" (ie, the "back button") state,
+ // but in practice this hasn't been used - only the current URI is of
+ // interest to clients.
+ // We stopped recording this in bug 1783991.
+ if (!tab?.linkedBrowser) {
+ continue;
+ }
+ let acceptable = !filter
+ ? url => url
+ : url =>
+ url &&
+ !lazy.TABS_FILTERED_SCHEMES.has(Services.io.extractScheme(url));
+
+ let url = tab.linkedBrowser.currentURI?.spec;
+ // Special case for reader mode.
+ if (url && url.startsWith("about:reader?")) {
+ url = lazy.ReaderMode.getOriginalUrl(url);
+ }
+ // We ignore the tab completely if the current entry url is
+ // not acceptable (we need something accurate to open).
+ if (!acceptable(url)) {
+ continue;
+ }
+
+ if (url.length > URI_LENGTH_MAX) {
+ log.trace("Skipping over-long URL.");
+ continue;
+ }
+
+ let thisTab = new lazy.RemoteTabRecord({
+ title: tab.linkedBrowser.contentTitle || "",
+ urlHistory: [url],
+ icon: "",
+ lastUsed: Math.floor((tab.lastAccessed || 0) / 1000),
+ });
+ tabRecords.push(thisTab);
+
+ // we don't want to wait for each favicon to resolve to get the bytes
+ // so we estimate a conservative 100 chars for the favicon and json overhead
+ // Rust will further optimize and trim if we happened to be wildly off
+ runningByteLength +=
+ encoder.encode(thisTab.title + thisTab.lastUsed + url).byteLength + 100;
+
+ // Use the favicon service for the icon url - we can wait for the promises at the end.
+ let iconPromise = lazy.PlacesUtils.promiseFaviconData(url)
+ .then(iconData => {
+ thisTab.icon = iconData.uri.spec;
+ })
+ .catch(ex => {
+ log.trace(
+ `Failed to fetch favicon for ${url}`,
+ thisTab.urlHistory[0]
+ );
+ });
+ iconPromises.push(iconPromise);
+ }
+
+ await Promise.allSettled(iconPromises);
+ return tabRecords;
+ },
+};
+
+function TabTracker(name, engine) {
+ Tracker.call(this, name, engine);
+
+ // Make sure "this" pointer is always set correctly for event listeners.
+ this.onTab = Utils.bind2(this, this.onTab);
+ this._unregisterListeners = Utils.bind2(this, this._unregisterListeners);
+}
+TabTracker.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIObserver"]),
+
+ clearChangedIDs() {
+ this.modified = false;
+ },
+
+ // We do not track TabSelect because that almost always triggers
+ // the web progress listeners (onLocationChange), which we already track
+ _topics: ["TabOpen", "TabClose"],
+
+ _registerListenersForWindow(window) {
+ this._log.trace("Registering tab listeners in window");
+ for (let topic of this._topics) {
+ window.addEventListener(topic, this.onTab);
+ }
+ window.addEventListener("unload", this._unregisterListeners);
+ // If it's got a tab browser we can listen for things like navigation.
+ if (window.gBrowser) {
+ window.gBrowser.addProgressListener(this);
+ }
+ },
+
+ _unregisterListeners(event) {
+ this._unregisterListenersForWindow(event.target);
+ },
+
+ _unregisterListenersForWindow(window) {
+ this._log.trace("Removing tab listeners in window");
+ window.removeEventListener("unload", this._unregisterListeners);
+ for (let topic of this._topics) {
+ window.removeEventListener(topic, this.onTab);
+ }
+ if (window.gBrowser) {
+ window.gBrowser.removeProgressListener(this);
+ }
+ },
+
+ onStart() {
+ Svc.Obs.add("domwindowopened", this.asyncObserver);
+ for (let win of Services.wm.getEnumerator("navigator:browser")) {
+ this._registerListenersForWindow(win);
+ }
+ },
+
+ onStop() {
+ Svc.Obs.remove("domwindowopened", this.asyncObserver);
+ for (let win of Services.wm.getEnumerator("navigator:browser")) {
+ this._unregisterListenersForWindow(win);
+ }
+ },
+
+ async observe(subject, topic, data) {
+ switch (topic) {
+ case "domwindowopened":
+ let onLoad = () => {
+ subject.removeEventListener("load", onLoad);
+ // Only register after the window is done loading to avoid unloads.
+ this._registerListenersForWindow(subject);
+ };
+
+ // Add tab listeners now that a window has opened.
+ subject.addEventListener("load", onLoad);
+ break;
+ }
+ },
+
+ onTab(event) {
+ if (event.originalTarget.linkedBrowser) {
+ let browser = event.originalTarget.linkedBrowser;
+ if (
+ lazy.PrivateBrowsingUtils.isBrowserPrivate(browser) &&
+ !lazy.PrivateBrowsingUtils.permanentPrivateBrowsing
+ ) {
+ this._log.trace("Ignoring tab event from private browsing.");
+ return;
+ }
+ }
+ this._log.trace("onTab event: " + event.type);
+
+ switch (event.type) {
+ case "TabOpen":
+ /* We do not have a reliable way of checking the URI on the TabOpen
+ * so we will rely on the other methods (onLocationChange, getAllTabsWithEstimatedMax)
+ * to filter these when going through sync
+ */
+ this.callScheduleSync(SCORE_INCREMENT_SMALL);
+ break;
+ case "TabClose":
+ // If event target has `linkedBrowser`, the event target can be assumed <tab> element.
+ // Else, event target is assumed <browser> element, use the target as it is.
+ const tab = event.target.linkedBrowser || event.target;
+
+ // TabClose means the tab has already loaded and we can check the URI
+ // and ignore if it's a scheme we don't care about
+ if (lazy.TABS_FILTERED_SCHEMES.has(tab.currentURI.scheme)) {
+ return;
+ }
+ this.callScheduleSync(SCORE_INCREMENT_SMALL);
+ break;
+ }
+ },
+
+ // web progress listeners.
+ onLocationChange(webProgress, request, locationURI, flags) {
+ // We only care about top-level location changes. We do want location changes in the
+ // same document because if a page uses the `pushState()` API, they *appear* as though
+ // they are in the same document even if the URL changes. It also doesn't hurt to accurately
+ // reflect the fragment changing - so we allow LOCATION_CHANGE_SAME_DOCUMENT
+ if (
+ flags & Ci.nsIWebProgressListener.LOCATION_CHANGE_RELOAD ||
+ !webProgress.isTopLevel ||
+ !locationURI
+ ) {
+ return;
+ }
+
+ // We can't filter out tabs that we don't sync here, because we might be
+ // navigating from a tab that we *did* sync to one we do not, and that
+ // tab we *did* sync should no longer be synced.
+ this.callScheduleSync();
+ },
+
+ callScheduleSync(scoreIncrement) {
+ this.modified = true;
+ let { scheduler } = this.engine.service;
+ let delayInMs = lazy.SYNC_AFTER_DELAY_MS;
+
+ // Schedule a sync once we detect a tab change
+ // to ensure the server always has the most up to date tabs
+ if (
+ delayInMs > 0 &&
+ scheduler.numClients > 1 // Only schedule quick syncs for multi client users
+ ) {
+ if (this.tabsQuickWriteTimer) {
+ this._log.debug(
+ "Detected a tab change, but a quick-write is already scheduled"
+ );
+ return;
+ }
+ this._log.debug(
+ "Detected a tab change: scheduling a quick-write in " + delayInMs + "ms"
+ );
+ CommonUtils.namedTimer(
+ () => {
+ this._log.trace("tab quick-sync timer fired.");
+ this.engine
+ .quickWrite()
+ .then(() => {
+ this._log.trace("tab quick-sync done.");
+ })
+ .catch(ex => {
+ this._log.error("tab quick-sync failed.", ex);
+ });
+ },
+ delayInMs,
+ this,
+ "tabsQuickWriteTimer"
+ );
+ } else if (scoreIncrement) {
+ this._log.debug(
+ "Detected a tab change, but conditions aren't met for a quick write - bumping score"
+ );
+ this.score += scoreIncrement;
+ } else {
+ this._log.debug(
+ "Detected a tab change, but conditions aren't met for a quick write or a score bump"
+ );
+ }
+ },
+};
+Object.setPrototypeOf(TabTracker.prototype, Tracker.prototype);
diff --git a/services/sync/modules/keys.sys.mjs b/services/sync/modules/keys.sys.mjs
new file mode 100644
index 0000000000..b6a1dce19a
--- /dev/null
+++ b/services/sync/modules/keys.sys.mjs
@@ -0,0 +1,166 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Weave } from "resource://services-sync/main.sys.mjs";
+
+/**
+ * Represents a pair of keys.
+ *
+ * Each key stored in a key bundle is 256 bits. One key is used for symmetric
+ * encryption. The other is used for HMAC.
+ *
+ * A KeyBundle by itself is just an anonymous pair of keys. Other types
+ * deriving from this one add semantics, such as associated collections or
+ * generating a key bundle via HKDF from another key.
+ */
+function KeyBundle() {
+ this._encrypt = null;
+ this._encryptB64 = null;
+ this._hmac = null;
+ this._hmacB64 = null;
+}
+KeyBundle.prototype = {
+ _encrypt: null,
+ _encryptB64: null,
+ _hmac: null,
+ _hmacB64: null,
+
+ equals: function equals(bundle) {
+ return (
+ bundle &&
+ bundle.hmacKey == this.hmacKey &&
+ bundle.encryptionKey == this.encryptionKey
+ );
+ },
+
+ /*
+ * Accessors for the two keys.
+ */
+ get encryptionKey() {
+ return this._encrypt;
+ },
+
+ set encryptionKey(value) {
+ if (!value || typeof value != "string") {
+ throw new Error("Encryption key can only be set to string values.");
+ }
+
+ if (value.length < 16) {
+ throw new Error("Encryption key must be at least 128 bits long.");
+ }
+
+ this._encrypt = value;
+ this._encryptB64 = btoa(value);
+ },
+
+ get encryptionKeyB64() {
+ return this._encryptB64;
+ },
+
+ get hmacKey() {
+ return this._hmac;
+ },
+
+ set hmacKey(value) {
+ if (!value || typeof value != "string") {
+ throw new Error("HMAC key can only be set to string values.");
+ }
+
+ if (value.length < 16) {
+ throw new Error("HMAC key must be at least 128 bits long.");
+ }
+
+ this._hmac = value;
+ this._hmacB64 = btoa(value);
+ },
+
+ get hmacKeyB64() {
+ return this._hmacB64;
+ },
+
+ /**
+ * Populate this key pair with 2 new, randomly generated keys.
+ */
+ async generateRandom() {
+ // Compute both at that same time
+ let [generatedHMAC, generatedEncr] = await Promise.all([
+ Weave.Crypto.generateRandomKey(),
+ Weave.Crypto.generateRandomKey(),
+ ]);
+ this.keyPairB64 = [generatedEncr, generatedHMAC];
+ },
+};
+
+/**
+ * Represents a KeyBundle associated with a collection.
+ *
+ * This is just a KeyBundle with a collection attached.
+ */
+export function BulkKeyBundle(collection) {
+ let log = Log.repository.getLogger("Sync.BulkKeyBundle");
+ log.info("BulkKeyBundle being created for " + collection);
+ KeyBundle.call(this);
+
+ this._collection = collection;
+}
+
+BulkKeyBundle.fromHexKey = function (hexKey) {
+ let key = CommonUtils.hexToBytes(hexKey);
+ let bundle = new BulkKeyBundle();
+ // [encryptionKey, hmacKey]
+ bundle.keyPair = [key.slice(0, 32), key.slice(32, 64)];
+ return bundle;
+};
+
+BulkKeyBundle.fromJWK = function (jwk) {
+ if (!jwk || !jwk.k || jwk.kty !== "oct") {
+ throw new Error("Invalid JWK provided to BulkKeyBundle.fromJWK");
+ }
+ return BulkKeyBundle.fromHexKey(CommonUtils.base64urlToHex(jwk.k));
+};
+
+BulkKeyBundle.prototype = {
+ get collection() {
+ return this._collection;
+ },
+
+ /**
+ * Obtain the key pair in this key bundle.
+ *
+ * The returned keys are represented as raw byte strings.
+ */
+ get keyPair() {
+ return [this.encryptionKey, this.hmacKey];
+ },
+
+ set keyPair(value) {
+ if (!Array.isArray(value) || value.length != 2) {
+ throw new Error("BulkKeyBundle.keyPair value must be array of 2 keys.");
+ }
+
+ this.encryptionKey = value[0];
+ this.hmacKey = value[1];
+ },
+
+ get keyPairB64() {
+ return [this.encryptionKeyB64, this.hmacKeyB64];
+ },
+
+ set keyPairB64(value) {
+ if (!Array.isArray(value) || value.length != 2) {
+ throw new Error(
+ "BulkKeyBundle.keyPairB64 value must be an array of 2 keys."
+ );
+ }
+
+ this.encryptionKey = CommonUtils.safeAtoB(value[0]);
+ this.hmacKey = CommonUtils.safeAtoB(value[1]);
+ },
+};
+
+Object.setPrototypeOf(BulkKeyBundle.prototype, KeyBundle.prototype);
diff --git a/services/sync/modules/main.sys.mjs b/services/sync/modules/main.sys.mjs
new file mode 100644
index 0000000000..838da15742
--- /dev/null
+++ b/services/sync/modules/main.sys.mjs
@@ -0,0 +1,23 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+export { lazy as Weave };
+
+const lazy = {};
+
+// We want these to be lazily loaded, which helps performance and also tests
+// to not have these loaded before they are ready.
+ChromeUtils.defineESModuleGetters(lazy, {
+ Service: "resource://services-sync/service.sys.mjs",
+ Status: "resource://services-sync/status.sys.mjs",
+ Svc: "resource://services-sync/util.sys.mjs",
+ Utils: "resource://services-sync/util.sys.mjs",
+});
+
+ChromeUtils.defineLazyGetter(lazy, "Crypto", () => {
+ let { WeaveCrypto } = ChromeUtils.importESModule(
+ "resource://services-crypto/WeaveCrypto.sys.mjs"
+ );
+ return new WeaveCrypto();
+});
diff --git a/services/sync/modules/policies.sys.mjs b/services/sync/modules/policies.sys.mjs
new file mode 100644
index 0000000000..290e793b8e
--- /dev/null
+++ b/services/sync/modules/policies.sys.mjs
@@ -0,0 +1,1055 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import {
+ CREDENTIALS_CHANGED,
+ ENGINE_APPLY_FAIL,
+ ENGINE_UNKNOWN_FAIL,
+ IDLE_OBSERVER_BACK_DELAY,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_LOGIN_REJECTED,
+ LOGIN_FAILED_NETWORK_ERROR,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_SUCCEEDED,
+ MASTER_PASSWORD_LOCKED,
+ MASTER_PASSWORD_LOCKED_RETRY_INTERVAL,
+ MAX_ERROR_COUNT_BEFORE_BACKOFF,
+ MINIMUM_BACKOFF_INTERVAL,
+ MULTI_DEVICE_THRESHOLD,
+ NO_SYNC_NODE_FOUND,
+ NO_SYNC_NODE_INTERVAL,
+ OVER_QUOTA,
+ RESPONSE_OVER_QUOTA,
+ SCORE_UPDATE_DELAY,
+ SERVER_MAINTENANCE,
+ SINGLE_USER_THRESHOLD,
+ STATUS_OK,
+ SYNC_FAILED_PARTIAL,
+ SYNC_SUCCEEDED,
+ kSyncBackoffNotMet,
+ kSyncMasterPasswordLocked,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { logManager } from "resource://gre/modules/FxAccountsCommon.sys.mjs";
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+ Status: "resource://services-sync/status.sys.mjs",
+});
+ChromeUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "IdleService",
+ "@mozilla.org/widget/useridleservice;1",
+ "nsIUserIdleService"
+);
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "CaptivePortalService",
+ "@mozilla.org/network/captive-portal-service;1",
+ "nsICaptivePortalService"
+);
+
+// Get the value for an interval that's stored in preferences. To save users
+// from themselves (and us from them!) the minimum time they can specify
+// is 60s.
+function getThrottledIntervalPreference(prefName) {
+ return Math.max(Svc.PrefBranch.getIntPref(prefName), 60) * 1000;
+}
+
+export function SyncScheduler(service) {
+ this.service = service;
+ this.init();
+}
+
+SyncScheduler.prototype = {
+ _log: Log.repository.getLogger("Sync.SyncScheduler"),
+
+ _fatalLoginStatus: [
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_LOGIN_REJECTED,
+ ],
+
+ /**
+ * The nsITimer object that schedules the next sync. See scheduleNextSync().
+ */
+ syncTimer: null,
+
+ setDefaults: function setDefaults() {
+ this._log.trace("Setting SyncScheduler policy values to defaults.");
+
+ this.singleDeviceInterval = getThrottledIntervalPreference(
+ "scheduler.fxa.singleDeviceInterval"
+ );
+ this.idleInterval = getThrottledIntervalPreference(
+ "scheduler.idleInterval"
+ );
+ this.activeInterval = getThrottledIntervalPreference(
+ "scheduler.activeInterval"
+ );
+ this.immediateInterval = getThrottledIntervalPreference(
+ "scheduler.immediateInterval"
+ );
+
+ // A user is non-idle on startup by default.
+ this.idle = false;
+
+ this.hasIncomingItems = false;
+ // This is the last number of clients we saw when previously updating the
+ // client mode. If this != currentNumClients (obtained from prefs written
+ // by the clients engine) then we need to transition to and from
+ // single and multi-device mode.
+ this.numClientsLastSync = 0;
+
+ this._resyncs = 0;
+
+ this.clearSyncTriggers();
+ },
+
+ // nextSync is in milliseconds, but prefs can't hold that much
+ get nextSync() {
+ return Svc.PrefBranch.getIntPref("nextSync", 0) * 1000;
+ },
+ set nextSync(value) {
+ Svc.PrefBranch.setIntPref("nextSync", Math.floor(value / 1000));
+ },
+
+ get missedFxACommandsFetchInterval() {
+ return Services.prefs.getIntPref(
+ "identity.fxaccounts.commands.missed.fetch_interval"
+ );
+ },
+
+ get missedFxACommandsLastFetch() {
+ return Services.prefs.getIntPref(
+ "identity.fxaccounts.commands.missed.last_fetch",
+ 0
+ );
+ },
+
+ set missedFxACommandsLastFetch(val) {
+ Services.prefs.setIntPref(
+ "identity.fxaccounts.commands.missed.last_fetch",
+ val
+ );
+ },
+
+ get syncInterval() {
+ return this._syncInterval;
+ },
+ set syncInterval(value) {
+ if (value != this._syncInterval) {
+ Services.prefs.setIntPref("services.sync.syncInterval", value);
+ }
+ },
+
+ get syncThreshold() {
+ return this._syncThreshold;
+ },
+ set syncThreshold(value) {
+ if (value != this._syncThreshold) {
+ Services.prefs.setIntPref("services.sync.syncThreshold", value);
+ }
+ },
+
+ get globalScore() {
+ return this._globalScore;
+ },
+ set globalScore(value) {
+ if (this._globalScore != value) {
+ Services.prefs.setIntPref("services.sync.globalScore", value);
+ }
+ },
+
+ // Managed by the clients engine (by way of prefs)
+ get numClients() {
+ return this.numDesktopClients + this.numMobileClients;
+ },
+ set numClients(value) {
+ throw new Error("Don't set numClients - the clients engine manages it.");
+ },
+
+ get offline() {
+ // Services.io.offline has slowly become fairly useless over the years - it
+ // no longer attempts to track the actual network state by default, but one
+ // thing stays true: if it says we're offline then we are definitely not online.
+ //
+ // We also ask the captive portal service if we are behind a locked captive
+ // portal.
+ //
+ // We don't check on the NetworkLinkService however, because it gave us
+ // false positives in the past in a vm environment.
+ try {
+ if (
+ Services.io.offline ||
+ lazy.CaptivePortalService.state ==
+ lazy.CaptivePortalService.LOCKED_PORTAL
+ ) {
+ return true;
+ }
+ } catch (ex) {
+ this._log.warn("Could not determine network status.", ex);
+ }
+ return false;
+ },
+
+ _initPrefGetters() {
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "idleTime",
+ "services.sync.scheduler.idleTime"
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "maxResyncs",
+ "services.sync.maxResyncs",
+ 0
+ );
+
+ // The number of clients we have is maintained in preferences via the
+ // clients engine, and only updated after a successsful sync.
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "numDesktopClients",
+ "services.sync.clients.devices.desktop",
+ 0
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "numMobileClients",
+ "services.sync.clients.devices.mobile",
+ 0
+ );
+
+ // Scheduler state that seems to be read more often than it's written.
+ // We also check if the value has changed before writing in the setters.
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_syncThreshold",
+ "services.sync.syncThreshold",
+ SINGLE_USER_THRESHOLD
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_syncInterval",
+ "services.sync.syncInterval",
+ this.singleDeviceInterval
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_globalScore",
+ "services.sync.globalScore",
+ 0
+ );
+ },
+
+ init: function init() {
+ this._log.manageLevelFromPref("services.sync.log.logger.service.main");
+ this.setDefaults();
+ this._initPrefGetters();
+ Svc.Obs.add("weave:engine:score:updated", this);
+ Svc.Obs.add("network:offline-status-changed", this);
+ Svc.Obs.add("network:link-status-changed", this);
+ Svc.Obs.add("captive-portal-detected", this);
+ Svc.Obs.add("weave:service:sync:start", this);
+ Svc.Obs.add("weave:service:sync:finish", this);
+ Svc.Obs.add("weave:engine:sync:finish", this);
+ Svc.Obs.add("weave:engine:sync:error", this);
+ Svc.Obs.add("weave:service:login:error", this);
+ Svc.Obs.add("weave:service:logout:finish", this);
+ Svc.Obs.add("weave:service:sync:error", this);
+ Svc.Obs.add("weave:service:backoff:interval", this);
+ Svc.Obs.add("weave:engine:sync:applied", this);
+ Svc.Obs.add("weave:service:setup-complete", this);
+ Svc.Obs.add("weave:service:start-over", this);
+ Svc.Obs.add("FxA:hawk:backoff:interval", this);
+
+ if (lazy.Status.checkSetup() == STATUS_OK) {
+ Svc.Obs.add("wake_notification", this);
+ Svc.Obs.add("captive-portal-login-success", this);
+ Svc.Obs.add("sleep_notification", this);
+ lazy.IdleService.addIdleObserver(this, this.idleTime);
+ }
+ },
+
+ // eslint-disable-next-line complexity
+ observe: function observe(subject, topic, data) {
+ this._log.trace("Handling " + topic);
+ switch (topic) {
+ case "weave:engine:score:updated":
+ if (lazy.Status.login == LOGIN_SUCCEEDED) {
+ CommonUtils.namedTimer(
+ this.calculateScore,
+ SCORE_UPDATE_DELAY,
+ this,
+ "_scoreTimer"
+ );
+ }
+ break;
+ case "network:link-status-changed":
+ // Note: NetworkLinkService is unreliable, we get false negatives for it
+ // in cases such as VMs (bug 1420802), so we don't want to use it in
+ // `get offline`, but we assume that it's probably reliable if we're
+ // getting status changed events. (We might be wrong about this, but
+ // if that's true, then the only downside is that we won't sync as
+ // promptly).
+ let isOffline = this.offline;
+ this._log.debug(
+ `Network link status changed to "${data}". Offline?`,
+ isOffline
+ );
+ // Data may be one of `up`, `down`, `change`, or `unknown`. We only want
+ // to sync if it's "up".
+ if (data == "up" && !isOffline) {
+ this._log.debug("Network link looks up. Syncing.");
+ this.scheduleNextSync(0, { why: topic });
+ } else if (data == "down") {
+ // Unschedule pending syncs if we know we're going down. We don't do
+ // this via `checkSyncStatus`, since link status isn't reflected in
+ // `this.offline`.
+ this.clearSyncTriggers();
+ }
+ break;
+ case "network:offline-status-changed":
+ case "captive-portal-detected":
+ // Whether online or offline, we'll reschedule syncs
+ this._log.trace("Network offline status change: " + data);
+ this.checkSyncStatus();
+ break;
+ case "weave:service:sync:start":
+ // Clear out any potentially pending syncs now that we're syncing
+ this.clearSyncTriggers();
+
+ // reset backoff info, if the server tells us to continue backing off,
+ // we'll handle that later
+ lazy.Status.resetBackoff();
+
+ this.globalScore = 0;
+ break;
+ case "weave:service:sync:finish":
+ this.nextSync = 0;
+ this.adjustSyncInterval();
+
+ if (
+ lazy.Status.service == SYNC_FAILED_PARTIAL &&
+ this.requiresBackoff
+ ) {
+ this.requiresBackoff = false;
+ this.handleSyncError();
+ return;
+ }
+
+ let sync_interval;
+ let nextSyncReason = "schedule";
+ this.updateGlobalScore();
+ if (
+ this.globalScore > this.syncThreshold &&
+ lazy.Status.service == STATUS_OK
+ ) {
+ // The global score should be 0 after a sync. If it's not, either
+ // items were changed during the last sync (and we should schedule an
+ // immediate follow-up sync), or an engine skipped
+ this._resyncs++;
+ if (this._resyncs <= this.maxResyncs) {
+ sync_interval = 0;
+ nextSyncReason = "resync";
+ } else {
+ this._log.warn(
+ `Resync attempt ${this._resyncs} exceeded ` +
+ `maximum ${this.maxResyncs}`
+ );
+ Svc.Obs.notify("weave:service:resyncs-finished");
+ }
+ } else {
+ this._resyncs = 0;
+ Svc.Obs.notify("weave:service:resyncs-finished");
+ }
+
+ this._syncErrors = 0;
+ if (lazy.Status.sync == NO_SYNC_NODE_FOUND) {
+ // If we don't have a Sync node, override the interval, even if we've
+ // scheduled a follow-up sync.
+ this._log.trace("Scheduling a sync at interval NO_SYNC_NODE_FOUND.");
+ sync_interval = NO_SYNC_NODE_INTERVAL;
+ }
+ this.scheduleNextSync(sync_interval, { why: nextSyncReason });
+ break;
+ case "weave:engine:sync:finish":
+ if (data == "clients") {
+ // Update the client mode because it might change what we sync.
+ this.updateClientMode();
+ }
+ break;
+ case "weave:engine:sync:error":
+ // `subject` is the exception thrown by an engine's sync() method.
+ let exception = subject;
+ if (exception.status >= 500 && exception.status <= 504) {
+ this.requiresBackoff = true;
+ }
+ break;
+ case "weave:service:login:error":
+ this.clearSyncTriggers();
+
+ if (lazy.Status.login == MASTER_PASSWORD_LOCKED) {
+ // Try again later, just as if we threw an error... only without the
+ // error count.
+ this._log.debug("Couldn't log in: master password is locked.");
+ this._log.trace(
+ "Scheduling a sync at MASTER_PASSWORD_LOCKED_RETRY_INTERVAL"
+ );
+ this.scheduleAtInterval(MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
+ } else if (!this._fatalLoginStatus.includes(lazy.Status.login)) {
+ // Not a fatal login error, just an intermittent network or server
+ // issue. Keep on syncin'.
+ this.checkSyncStatus();
+ }
+ break;
+ case "weave:service:logout:finish":
+ // Start or cancel the sync timer depending on if
+ // logged in or logged out
+ this.checkSyncStatus();
+ break;
+ case "weave:service:sync:error":
+ // There may be multiple clients but if the sync fails, client mode
+ // should still be updated so that the next sync has a correct interval.
+ this.updateClientMode();
+ this.adjustSyncInterval();
+ this.nextSync = 0;
+ this.handleSyncError();
+ break;
+ case "FxA:hawk:backoff:interval":
+ case "weave:service:backoff:interval":
+ let requested_interval = subject * 1000;
+ this._log.debug(
+ "Got backoff notification: " + requested_interval + "ms"
+ );
+ // Leave up to 25% more time for the back off.
+ let interval = requested_interval * (1 + Math.random() * 0.25);
+ lazy.Status.backoffInterval = interval;
+ lazy.Status.minimumNextSync = Date.now() + requested_interval;
+ this._log.debug(
+ "Fuzzed minimum next sync: " + lazy.Status.minimumNextSync
+ );
+ break;
+ case "weave:engine:sync:applied":
+ let numItems = subject.succeeded;
+ this._log.trace(
+ "Engine " + data + " successfully applied " + numItems + " items."
+ );
+ // Bug 1800186 - the tabs engine always reports incoming items, so we don't
+ // want special scheduling in this scenario.
+ // (However, even when we fix the underlying cause of that, we probably still can
+ // ignore tabs here - new incoming tabs don't need to trigger the extra syncs we do
+ // based on this flag.)
+ if (data != "tabs" && numItems) {
+ this.hasIncomingItems = true;
+ }
+ if (subject.newFailed) {
+ this._log.error(
+ `Engine ${data} found ${subject.newFailed} new records that failed to apply`
+ );
+ }
+ break;
+ case "weave:service:setup-complete":
+ Services.prefs.savePrefFile(null);
+ lazy.IdleService.addIdleObserver(this, this.idleTime);
+ Svc.Obs.add("wake_notification", this);
+ Svc.Obs.add("captive-portal-login-success", this);
+ Svc.Obs.add("sleep_notification", this);
+ break;
+ case "weave:service:start-over":
+ this.setDefaults();
+ try {
+ lazy.IdleService.removeIdleObserver(this, this.idleTime);
+ } catch (ex) {
+ if (ex.result != Cr.NS_ERROR_FAILURE) {
+ throw ex;
+ }
+ // In all likelihood we didn't have an idle observer registered yet.
+ // It's all good.
+ }
+ break;
+ case "idle":
+ this._log.trace("We're idle.");
+ this.idle = true;
+ // Adjust the interval for future syncs. This won't actually have any
+ // effect until the next pending sync (which will happen soon since we
+ // were just active.)
+ this.adjustSyncInterval();
+ break;
+ case "active":
+ this._log.trace("Received notification that we're back from idle.");
+ this.idle = false;
+ CommonUtils.namedTimer(
+ function onBack() {
+ if (this.idle) {
+ this._log.trace(
+ "... and we're idle again. " +
+ "Ignoring spurious back notification."
+ );
+ return;
+ }
+
+ this._log.trace("Genuine return from idle. Syncing.");
+ // Trigger a sync if we have multiple clients.
+ if (this.numClients > 1) {
+ this.scheduleNextSync(0, { why: topic });
+ }
+ },
+ IDLE_OBSERVER_BACK_DELAY,
+ this,
+ "idleDebouncerTimer"
+ );
+ break;
+ case "wake_notification":
+ this._log.debug("Woke from sleep.");
+ CommonUtils.nextTick(() => {
+ // Trigger a sync if we have multiple clients. We give it 2 seconds
+ // so the browser can recover from the wake and do more important
+ // operations first (timers etc).
+ if (this.numClients > 1) {
+ if (!this.offline) {
+ this._log.debug("Online, will sync in 2s.");
+ this.scheduleNextSync(2000, { why: topic });
+ }
+ }
+ });
+ break;
+ case "captive-portal-login-success":
+ this._log.debug("Captive portal login success. Scheduling a sync.");
+ CommonUtils.nextTick(() => {
+ this.scheduleNextSync(3000, { why: topic });
+ });
+ break;
+ case "sleep_notification":
+ if (this.service.engineManager.get("tabs")._tracker.modified) {
+ this._log.debug("Going to sleep, doing a quick sync.");
+ this.scheduleNextSync(0, { engines: ["tabs"], why: "sleep" });
+ }
+ break;
+ }
+ },
+
+ adjustSyncInterval: function adjustSyncInterval() {
+ if (this.numClients <= 1) {
+ this._log.trace("Adjusting syncInterval to singleDeviceInterval.");
+ this.syncInterval = this.singleDeviceInterval;
+ return;
+ }
+
+ // Only MULTI_DEVICE clients will enter this if statement
+ // since SINGLE_USER clients will be handled above.
+ if (this.idle) {
+ this._log.trace("Adjusting syncInterval to idleInterval.");
+ this.syncInterval = this.idleInterval;
+ return;
+ }
+
+ if (this.hasIncomingItems) {
+ this._log.trace("Adjusting syncInterval to immediateInterval.");
+ this.hasIncomingItems = false;
+ this.syncInterval = this.immediateInterval;
+ } else {
+ this._log.trace("Adjusting syncInterval to activeInterval.");
+ this.syncInterval = this.activeInterval;
+ }
+ },
+
+ updateGlobalScore() {
+ let engines = [this.service.clientsEngine].concat(
+ this.service.engineManager.getEnabled()
+ );
+ let globalScore = this.globalScore;
+ for (let i = 0; i < engines.length; i++) {
+ this._log.trace(engines[i].name + ": score: " + engines[i].score);
+ globalScore += engines[i].score;
+ engines[i]._tracker.resetScore();
+ }
+ this.globalScore = globalScore;
+ this._log.trace("Global score updated: " + globalScore);
+ },
+
+ calculateScore() {
+ this.updateGlobalScore();
+ this.checkSyncStatus();
+ },
+
+ /**
+ * Query the number of known clients to figure out what mode to be in
+ */
+ updateClientMode: function updateClientMode() {
+ // Nothing to do if it's the same amount
+ let numClients = this.numClients;
+ if (numClients == this.numClientsLastSync) {
+ return;
+ }
+
+ this._log.debug(
+ `Client count: ${this.numClientsLastSync} -> ${numClients}`
+ );
+ this.numClientsLastSync = numClients;
+
+ if (numClients <= 1) {
+ this._log.trace("Adjusting syncThreshold to SINGLE_USER_THRESHOLD");
+ this.syncThreshold = SINGLE_USER_THRESHOLD;
+ } else {
+ this._log.trace("Adjusting syncThreshold to MULTI_DEVICE_THRESHOLD");
+ this.syncThreshold = MULTI_DEVICE_THRESHOLD;
+ }
+ this.adjustSyncInterval();
+ },
+
+ /**
+ * Check if we should be syncing and schedule the next sync, if it's not scheduled
+ */
+ checkSyncStatus: function checkSyncStatus() {
+ // Should we be syncing now, if not, cancel any sync timers and return
+ // if we're in backoff, we'll schedule the next sync.
+ let ignore = [kSyncBackoffNotMet, kSyncMasterPasswordLocked];
+ let skip = this.service._checkSync(ignore);
+ this._log.trace('_checkSync returned "' + skip + '".');
+ if (skip) {
+ this.clearSyncTriggers();
+ return;
+ }
+
+ let why = "schedule";
+ // Only set the wait time to 0 if we need to sync right away
+ let wait;
+ if (this.globalScore > this.syncThreshold) {
+ this._log.debug("Global Score threshold hit, triggering sync.");
+ wait = 0;
+ why = "score";
+ }
+ this.scheduleNextSync(wait, { why });
+ },
+
+ /**
+ * Call sync() if Master Password is not locked.
+ *
+ * Otherwise, reschedule a sync for later.
+ */
+ syncIfMPUnlocked(engines, why) {
+ // No point if we got kicked out by the master password dialog.
+ if (lazy.Status.login == MASTER_PASSWORD_LOCKED && Utils.mpLocked()) {
+ this._log.debug(
+ "Not initiating sync: Login status is " + lazy.Status.login
+ );
+
+ // If we're not syncing now, we need to schedule the next one.
+ this._log.trace(
+ "Scheduling a sync at MASTER_PASSWORD_LOCKED_RETRY_INTERVAL"
+ );
+ this.scheduleAtInterval(MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
+ return;
+ }
+
+ if (!Async.isAppReady()) {
+ this._log.debug("Not initiating sync: app is shutting down");
+ return;
+ }
+ Services.tm.dispatchToMainThread(() => {
+ this.service.sync({ engines, why });
+ const now = Math.round(new Date().getTime() / 1000);
+ // Only fetch missed messages in a "scheduled" sync so we don't race against
+ // the Push service reconnecting on a network link change for example.
+ if (
+ why == "schedule" &&
+ now >=
+ this.missedFxACommandsLastFetch + this.missedFxACommandsFetchInterval
+ ) {
+ lazy.fxAccounts.commands
+ .pollDeviceCommands()
+ .then(() => {
+ this.missedFxACommandsLastFetch = now;
+ })
+ .catch(e => {
+ this._log.error("Fetching missed remote commands failed.", e);
+ });
+ }
+ });
+ },
+
+ /**
+ * Set a timer for the next sync
+ */
+ scheduleNextSync(interval, { engines = null, why = null } = {}) {
+ // If no interval was specified, use the current sync interval.
+ if (interval == null) {
+ interval = this.syncInterval;
+ }
+
+ // Ensure the interval is set to no less than the backoff.
+ if (lazy.Status.backoffInterval && interval < lazy.Status.backoffInterval) {
+ this._log.trace(
+ "Requested interval " +
+ interval +
+ " ms is smaller than the backoff interval. " +
+ "Using backoff interval " +
+ lazy.Status.backoffInterval +
+ " ms instead."
+ );
+ interval = lazy.Status.backoffInterval;
+ }
+ let nextSync = this.nextSync;
+ if (nextSync != 0) {
+ // There's already a sync scheduled. Don't reschedule if there's already
+ // a timer scheduled for sooner than requested.
+ let currentInterval = nextSync - Date.now();
+ this._log.trace(
+ "There's already a sync scheduled in " + currentInterval + " ms."
+ );
+ if (currentInterval < interval && this.syncTimer) {
+ this._log.trace(
+ "Ignoring scheduling request for next sync in " + interval + " ms."
+ );
+ return;
+ }
+ }
+
+ // Start the sync right away if we're already late.
+ if (interval <= 0) {
+ this._log.trace(`Requested sync should happen right away. (why=${why})`);
+ this.syncIfMPUnlocked(engines, why);
+ return;
+ }
+
+ this._log.debug(`Next sync in ${interval} ms. (why=${why})`);
+ CommonUtils.namedTimer(
+ () => {
+ this.syncIfMPUnlocked(engines, why);
+ },
+ interval,
+ this,
+ "syncTimer"
+ );
+
+ // Save the next sync time in-case sync is disabled (logout/offline/etc.)
+ this.nextSync = Date.now() + interval;
+ },
+
+ /**
+ * Incorporates the backoff/retry logic used in error handling and elective
+ * non-syncing.
+ */
+ scheduleAtInterval: function scheduleAtInterval(minimumInterval) {
+ let interval = Utils.calculateBackoff(
+ this._syncErrors,
+ MINIMUM_BACKOFF_INTERVAL,
+ lazy.Status.backoffInterval
+ );
+ if (minimumInterval) {
+ interval = Math.max(minimumInterval, interval);
+ }
+
+ this._log.debug(
+ "Starting client-initiated backoff. Next sync in " + interval + " ms."
+ );
+ this.scheduleNextSync(interval, { why: "client-backoff-schedule" });
+ },
+
+ autoConnect: function autoConnect() {
+ if (this.service._checkSetup() == STATUS_OK && !this.service._checkSync()) {
+ // Schedule a sync based on when a previous sync was scheduled.
+ // scheduleNextSync() will do the right thing if that time lies in
+ // the past.
+ this.scheduleNextSync(this.nextSync - Date.now(), { why: "startup" });
+ }
+ },
+
+ _syncErrors: 0,
+ /**
+ * Deal with sync errors appropriately
+ */
+ handleSyncError: function handleSyncError() {
+ this._log.trace("In handleSyncError. Error count: " + this._syncErrors);
+ this._syncErrors++;
+
+ // Do nothing on the first couple of failures, if we're not in
+ // backoff due to 5xx errors.
+ if (!lazy.Status.enforceBackoff) {
+ if (this._syncErrors < MAX_ERROR_COUNT_BEFORE_BACKOFF) {
+ this.scheduleNextSync(null, { why: "reschedule" });
+ return;
+ }
+ this._log.debug(
+ "Sync error count has exceeded " +
+ MAX_ERROR_COUNT_BEFORE_BACKOFF +
+ "; enforcing backoff."
+ );
+ lazy.Status.enforceBackoff = true;
+ }
+
+ this.scheduleAtInterval();
+ },
+
+ /**
+ * Remove any timers/observers that might trigger a sync
+ */
+ clearSyncTriggers: function clearSyncTriggers() {
+ this._log.debug("Clearing sync triggers and the global score.");
+ this.globalScore = this.nextSync = 0;
+
+ // Clear out any scheduled syncs
+ if (this.syncTimer) {
+ this.syncTimer.clear();
+ }
+ },
+};
+
+export function ErrorHandler(service) {
+ this.service = service;
+ this.init();
+}
+
+ErrorHandler.prototype = {
+ init() {
+ Svc.Obs.add("weave:engine:sync:applied", this);
+ Svc.Obs.add("weave:engine:sync:error", this);
+ Svc.Obs.add("weave:service:login:error", this);
+ Svc.Obs.add("weave:service:sync:error", this);
+ Svc.Obs.add("weave:service:sync:finish", this);
+ Svc.Obs.add("weave:service:start-over:finish", this);
+
+ this.initLogs();
+ },
+
+ initLogs: function initLogs() {
+ // Set the root Sync logger level based on a pref. All other logs will
+ // inherit this level unless they specifically override it.
+ Log.repository
+ .getLogger("Sync")
+ .manageLevelFromPref(`services.sync.log.logger`);
+ // And allow our specific log to have a custom level via a pref.
+ this._log = Log.repository.getLogger("Sync.ErrorHandler");
+ this._log.manageLevelFromPref("services.sync.log.logger.service.main");
+ },
+
+ observe(subject, topic, data) {
+ this._log.trace("Handling " + topic);
+ switch (topic) {
+ case "weave:engine:sync:applied":
+ if (subject.newFailed) {
+ // An engine isn't able to apply one or more incoming records.
+ // We don't fail hard on this, but it usually indicates a bug,
+ // so for now treat it as sync error (c.f. Service._syncEngine())
+ lazy.Status.engines = [data, ENGINE_APPLY_FAIL];
+ this._log.debug(data + " failed to apply some records.");
+ }
+ break;
+ case "weave:engine:sync:error": {
+ let exception = subject; // exception thrown by engine's sync() method
+ let engine_name = data; // engine name that threw the exception
+
+ this.checkServerError(exception);
+
+ lazy.Status.engines = [
+ engine_name,
+ exception.failureCode || ENGINE_UNKNOWN_FAIL,
+ ];
+ if (Async.isShutdownException(exception)) {
+ this._log.debug(
+ engine_name +
+ " was interrupted due to the application shutting down"
+ );
+ } else {
+ this._log.debug(engine_name + " failed", exception);
+ }
+ break;
+ }
+ case "weave:service:login:error":
+ this._log.error("Sync encountered a login error");
+ this.resetFileLog();
+ break;
+ case "weave:service:sync:error": {
+ if (lazy.Status.sync == CREDENTIALS_CHANGED) {
+ this.service.logout();
+ }
+
+ let exception = subject;
+ if (Async.isShutdownException(exception)) {
+ // If we are shutting down we just log the fact, attempt to flush
+ // the log file and get out of here!
+ this._log.error(
+ "Sync was interrupted due to the application shutting down"
+ );
+ this.resetFileLog();
+ break;
+ }
+
+ // Not a shutdown related exception...
+ this._log.error("Sync encountered an error", exception);
+ this.resetFileLog();
+ break;
+ }
+ case "weave:service:sync:finish":
+ this._log.trace("Status.service is " + lazy.Status.service);
+
+ // Check both of these status codes: in the event of a failure in one
+ // engine, Status.service will be SYNC_FAILED_PARTIAL despite
+ // Status.sync being SYNC_SUCCEEDED.
+ // *facepalm*
+ if (
+ lazy.Status.sync == SYNC_SUCCEEDED &&
+ lazy.Status.service == STATUS_OK
+ ) {
+ // Great. Let's clear our mid-sync 401 note.
+ this._log.trace("Clearing lastSyncReassigned.");
+ Svc.PrefBranch.clearUserPref("lastSyncReassigned");
+ }
+
+ if (lazy.Status.service == SYNC_FAILED_PARTIAL) {
+ this._log.error("Some engines did not sync correctly.");
+ }
+ this.resetFileLog();
+ break;
+ case "weave:service:start-over:finish":
+ // ensure we capture any logs between the last sync and the reset completing.
+ this.resetFileLog()
+ .then(() => {
+ // although for privacy reasons we also delete all logs (but we allow
+ // a preference to avoid this to help with debugging.)
+ if (!Svc.PrefBranch.getBoolPref("log.keepLogsOnReset", false)) {
+ return logManager.removeAllLogs().then(() => {
+ Svc.Obs.notify("weave:service:remove-file-log");
+ });
+ }
+ return null;
+ })
+ .catch(err => {
+ // So we failed to delete the logs - take the ironic option of
+ // writing this error to the logs we failed to delete!
+ this._log.error("Failed to delete logs on reset", err);
+ });
+ break;
+ }
+ },
+
+ async _dumpAddons() {
+ // Just dump the items that sync may be concerned with. Specifically,
+ // active extensions that are not hidden.
+ let addons = [];
+ try {
+ addons = await lazy.AddonManager.getAddonsByTypes(["extension"]);
+ } catch (e) {
+ this._log.warn("Failed to dump addons", e);
+ }
+
+ let relevantAddons = addons.filter(x => x.isActive && !x.hidden);
+ this._log.trace("Addons installed", relevantAddons.length);
+ for (let addon of relevantAddons) {
+ this._log.trace(" - ${name}, version ${version}, id ${id}", addon);
+ }
+ },
+
+ /**
+ * Generate a log file for the sync that just completed
+ * and refresh the input & output streams.
+ */
+ async resetFileLog() {
+ // If we're writing an error log, dump extensions that may be causing problems.
+ if (logManager.sawError) {
+ await this._dumpAddons();
+ }
+ const logType = await logManager.resetFileLog();
+ if (logType == logManager.ERROR_LOG_WRITTEN) {
+ console.error(
+ "Sync encountered an error - see about:sync-log for the log file."
+ );
+ }
+ Svc.Obs.notify("weave:service:reset-file-log");
+ },
+
+ /**
+ * Handle HTTP response results or exceptions and set the appropriate
+ * Status.* bits.
+ *
+ * This method also looks for "side-channel" warnings.
+ */
+ checkServerError(resp) {
+ // In this case we were passed a resolved value of Resource#_doRequest.
+ switch (resp.status) {
+ case 400:
+ if (resp == RESPONSE_OVER_QUOTA) {
+ lazy.Status.sync = OVER_QUOTA;
+ }
+ break;
+
+ case 401:
+ this.service.logout();
+ this._log.info("Got 401 response; resetting clusterURL.");
+ this.service.clusterURL = null;
+
+ let delay = 0;
+ if (Svc.PrefBranch.getBoolPref("lastSyncReassigned", false)) {
+ // We got a 401 in the middle of the previous sync, and we just got
+ // another. Login must have succeeded in order for us to get here, so
+ // the password should be correct.
+ // This is likely to be an intermittent server issue, so back off and
+ // give it time to recover.
+ this._log.warn("Last sync also failed for 401. Delaying next sync.");
+ delay = MINIMUM_BACKOFF_INTERVAL;
+ } else {
+ this._log.debug("New mid-sync 401 failure. Making a note.");
+ Svc.PrefBranch.setBoolPref("lastSyncReassigned", true);
+ }
+ this._log.info("Attempting to schedule another sync.");
+ this.service.scheduler.scheduleNextSync(delay, { why: "reschedule" });
+ break;
+
+ case 500:
+ case 502:
+ case 503:
+ case 504:
+ lazy.Status.enforceBackoff = true;
+ if (resp.status == 503 && resp.headers["retry-after"]) {
+ let retryAfter = resp.headers["retry-after"];
+ this._log.debug("Got Retry-After: " + retryAfter);
+ if (this.service.isLoggedIn) {
+ lazy.Status.sync = SERVER_MAINTENANCE;
+ } else {
+ lazy.Status.login = SERVER_MAINTENANCE;
+ }
+ Svc.Obs.notify(
+ "weave:service:backoff:interval",
+ parseInt(retryAfter, 10)
+ );
+ }
+ break;
+ }
+
+ // In this other case we were passed a rejection value.
+ switch (resp.result) {
+ case Cr.NS_ERROR_UNKNOWN_HOST:
+ case Cr.NS_ERROR_CONNECTION_REFUSED:
+ case Cr.NS_ERROR_NET_TIMEOUT:
+ case Cr.NS_ERROR_NET_RESET:
+ case Cr.NS_ERROR_NET_INTERRUPT:
+ case Cr.NS_ERROR_PROXY_CONNECTION_REFUSED:
+ // The constant says it's about login, but in fact it just
+ // indicates general network error.
+ if (this.service.isLoggedIn) {
+ lazy.Status.sync = LOGIN_FAILED_NETWORK_ERROR;
+ } else {
+ lazy.Status.login = LOGIN_FAILED_NETWORK_ERROR;
+ }
+ break;
+ }
+ },
+};
diff --git a/services/sync/modules/record.sys.mjs b/services/sync/modules/record.sys.mjs
new file mode 100644
index 0000000000..7d5918a8ca
--- /dev/null
+++ b/services/sync/modules/record.sys.mjs
@@ -0,0 +1,1335 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const CRYPTO_COLLECTION = "crypto";
+const KEYS_WBO = "keys";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import {
+ DEFAULT_DOWNLOAD_BATCH_SIZE,
+ DEFAULT_KEYBUNDLE_NAME,
+} from "resource://services-sync/constants.sys.mjs";
+import { BulkKeyBundle } from "resource://services-sync/keys.sys.mjs";
+import { Weave } from "resource://services-sync/main.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { CryptoUtils } from "resource://services-crypto/utils.sys.mjs";
+
+/**
+ * The base class for all Sync basic storage objects (BSOs). This is the format
+ * used to store all records on the Sync server. In an earlier version of the
+ * Sync protocol, BSOs used to be called WBOs, or Weave Basic Objects. This
+ * class retains the old name.
+ *
+ * @class
+ * @param {String} collection The collection name for this BSO.
+ * @param {String} id The ID of this BSO.
+ */
+export function WBORecord(collection, id) {
+ this.data = {};
+ this.payload = {};
+ this.collection = collection; // Optional.
+ this.id = id; // Optional.
+}
+
+WBORecord.prototype = {
+ _logName: "Sync.Record.WBO",
+
+ get sortindex() {
+ if (this.data.sortindex) {
+ return this.data.sortindex;
+ }
+ return 0;
+ },
+
+ // Get thyself from your URI, then deserialize.
+ // Set thine 'response' field.
+ async fetch(resource) {
+ if (!(resource instanceof Resource)) {
+ throw new Error("First argument must be a Resource instance.");
+ }
+
+ let r = await resource.get();
+ if (r.success) {
+ this.deserialize(r.obj); // Warning! Muffles exceptions!
+ }
+ this.response = r;
+ return this;
+ },
+
+ upload(resource) {
+ if (!(resource instanceof Resource)) {
+ throw new Error("First argument must be a Resource instance.");
+ }
+
+ return resource.put(this);
+ },
+
+ // Take a base URI string, with trailing slash, and return the URI of this
+ // WBO based on collection and ID.
+ uri(base) {
+ if (this.collection && this.id) {
+ let url = CommonUtils.makeURI(base + this.collection + "/" + this.id);
+ url.QueryInterface(Ci.nsIURL);
+ return url;
+ }
+ return null;
+ },
+
+ deserialize: function deserialize(json) {
+ if (!json || typeof json !== "object") {
+ throw new TypeError("Can't deserialize record from: " + json);
+ }
+ this.data = json;
+ try {
+ // The payload is likely to be JSON, but if not, keep it as a string
+ this.payload = JSON.parse(this.payload);
+ } catch (ex) {}
+ },
+
+ toJSON: function toJSON() {
+ // Copy fields from data to be stringified, making sure payload is a string
+ let obj = {};
+ for (let [key, val] of Object.entries(this.data)) {
+ obj[key] = key == "payload" ? JSON.stringify(val) : val;
+ }
+ if (this.ttl) {
+ obj.ttl = this.ttl;
+ }
+ return obj;
+ },
+
+ toString: function toString() {
+ return (
+ "{ " +
+ "id: " +
+ this.id +
+ " " +
+ "index: " +
+ this.sortindex +
+ " " +
+ "modified: " +
+ this.modified +
+ " " +
+ "ttl: " +
+ this.ttl +
+ " " +
+ "payload: " +
+ JSON.stringify(this.payload) +
+ " }"
+ );
+ },
+};
+
+Utils.deferGetSet(WBORecord, "data", [
+ "id",
+ "modified",
+ "sortindex",
+ "payload",
+]);
+
+/**
+ * An encrypted BSO record. This subclass handles encrypting and decrypting the
+ * BSO payload, but doesn't parse or interpret the cleartext string. Subclasses
+ * must override `transformBeforeEncrypt` and `transformAfterDecrypt` to process
+ * the cleartext.
+ *
+ * This class is only exposed for bridged engines, which handle serialization
+ * and deserialization in Rust. Sync engines implemented in JS should subclass
+ * `CryptoWrapper` instead, which takes care of transforming the cleartext into
+ * an object, and ensuring its contents are valid.
+ *
+ * @class
+ * @template Cleartext
+ * @param {String} collection The collection name for this BSO.
+ * @param {String} id The ID of this BSO.
+ */
+export function RawCryptoWrapper(collection, id) {
+ // Setting properties before calling the superclass constructor isn't allowed
+ // in new-style classes (`class MyRecord extends RawCryptoWrapper`), but
+ // allowed with plain functions. This is also why `defaultCleartext` is a
+ // method, and not simply set in the subclass constructor.
+ this.cleartext = this.defaultCleartext();
+ WBORecord.call(this, collection, id);
+ this.ciphertext = null;
+}
+
+RawCryptoWrapper.prototype = {
+ _logName: "Sync.Record.RawCryptoWrapper",
+
+ /**
+ * Returns the default empty cleartext for this record type. This is exposed
+ * as a method so that subclasses can override it, and access the default
+ * cleartext in their constructors. `CryptoWrapper`, for example, overrides
+ * this to return an empty object, so that initializing the `id` in its
+ * constructor calls its overridden `id` setter.
+ *
+ * @returns {Cleartext} An empty cleartext.
+ */
+ defaultCleartext() {
+ return null;
+ },
+
+ /**
+ * Transforms the cleartext into a string that can be encrypted and wrapped
+ * in a BSO payload. This is called before uploading the record to the server.
+ *
+ * @param {Cleartext} outgoingCleartext The cleartext to upload.
+ * @returns {String} The serialized cleartext.
+ */
+ transformBeforeEncrypt(outgoingCleartext) {
+ throw new TypeError("Override to stringify outgoing records");
+ },
+
+ /**
+ * Transforms an incoming cleartext string into an instance of the
+ * `Cleartext` type. This is called when fetching the record from the
+ * server.
+ *
+ * @param {String} incomingCleartext The decrypted cleartext string.
+ * @returns {Cleartext} The parsed cleartext.
+ */
+ transformAfterDecrypt(incomingCleartext) {
+ throw new TypeError("Override to parse incoming records");
+ },
+
+ ciphertextHMAC: async function ciphertextHMAC(keyBundle) {
+ let hmacKeyByteString = keyBundle.hmacKey;
+ if (!hmacKeyByteString) {
+ throw new Error("Cannot compute HMAC without an HMAC key.");
+ }
+ let hmacKey = CommonUtils.byteStringToArrayBuffer(hmacKeyByteString);
+ // NB: this.ciphertext is a base64-encoded string. For some reason this
+ // implementation computes the HMAC on the encoded value.
+ let data = CommonUtils.byteStringToArrayBuffer(this.ciphertext);
+ let hmac = await CryptoUtils.hmac("SHA-256", hmacKey, data);
+ return CommonUtils.bytesAsHex(CommonUtils.arrayBufferToByteString(hmac));
+ },
+
+ /*
+ * Don't directly use the sync key. Instead, grab a key for this
+ * collection, which is decrypted with the sync key.
+ *
+ * Cache those keys; invalidate the cache if the time on the keys collection
+ * changes, or other auth events occur.
+ *
+ * Optional key bundle overrides the collection key lookup.
+ */
+ async encrypt(keyBundle) {
+ if (!keyBundle) {
+ throw new Error("A key bundle must be supplied to encrypt.");
+ }
+
+ this.IV = Weave.Crypto.generateRandomIV();
+ this.ciphertext = await Weave.Crypto.encrypt(
+ this.transformBeforeEncrypt(this.cleartext),
+ keyBundle.encryptionKeyB64,
+ this.IV
+ );
+ this.hmac = await this.ciphertextHMAC(keyBundle);
+ this.cleartext = null;
+ },
+
+ // Optional key bundle.
+ async decrypt(keyBundle) {
+ if (!this.ciphertext) {
+ throw new Error("No ciphertext: nothing to decrypt?");
+ }
+
+ if (!keyBundle) {
+ throw new Error("A key bundle must be supplied to decrypt.");
+ }
+
+ // Authenticate the encrypted blob with the expected HMAC
+ let computedHMAC = await this.ciphertextHMAC(keyBundle);
+
+ if (computedHMAC != this.hmac) {
+ Utils.throwHMACMismatch(this.hmac, computedHMAC);
+ }
+
+ let cleartext = await Weave.Crypto.decrypt(
+ this.ciphertext,
+ keyBundle.encryptionKeyB64,
+ this.IV
+ );
+ this.cleartext = this.transformAfterDecrypt(cleartext);
+ this.ciphertext = null;
+
+ return this.cleartext;
+ },
+};
+
+Object.setPrototypeOf(RawCryptoWrapper.prototype, WBORecord.prototype);
+
+Utils.deferGetSet(RawCryptoWrapper, "payload", ["ciphertext", "IV", "hmac"]);
+
+/**
+ * An encrypted BSO record with a JSON payload. All engines implemented in JS
+ * should subclass this class to describe their own record types.
+ *
+ * @class
+ * @param {String} collection The collection name for this BSO.
+ * @param {String} id The ID of this BSO.
+ */
+export function CryptoWrapper(collection, id) {
+ RawCryptoWrapper.call(this, collection, id);
+}
+
+CryptoWrapper.prototype = {
+ _logName: "Sync.Record.CryptoWrapper",
+
+ defaultCleartext() {
+ return {};
+ },
+
+ transformBeforeEncrypt(cleartext) {
+ return JSON.stringify(cleartext);
+ },
+
+ transformAfterDecrypt(cleartext) {
+ // Handle invalid data here. Elsewhere we assume that cleartext is an object.
+ let json_result = JSON.parse(cleartext);
+
+ if (!(json_result && json_result instanceof Object)) {
+ throw new Error(
+ `Decryption failed: result is <${json_result}>, not an object.`
+ );
+ }
+
+ // If the payload has an encrypted id ensure it matches the requested record's id.
+ if (json_result.id && json_result.id != this.id) {
+ throw new Error(`Record id mismatch: ${json_result.id} != ${this.id}`);
+ }
+
+ return json_result;
+ },
+
+ cleartextToString() {
+ return JSON.stringify(this.cleartext);
+ },
+
+ toString: function toString() {
+ let payload = this.deleted ? "DELETED" : this.cleartextToString();
+
+ return (
+ "{ " +
+ "id: " +
+ this.id +
+ " " +
+ "index: " +
+ this.sortindex +
+ " " +
+ "modified: " +
+ this.modified +
+ " " +
+ "ttl: " +
+ this.ttl +
+ " " +
+ "payload: " +
+ payload +
+ " " +
+ "collection: " +
+ (this.collection || "undefined") +
+ " }"
+ );
+ },
+
+ // The custom setter below masks the parent's getter, so explicitly call it :(
+ get id() {
+ return super.id;
+ },
+
+ // Keep both plaintext and encrypted versions of the id to verify integrity
+ set id(val) {
+ super.id = val;
+ this.cleartext.id = val;
+ },
+};
+
+Object.setPrototypeOf(CryptoWrapper.prototype, RawCryptoWrapper.prototype);
+
+Utils.deferGetSet(CryptoWrapper, "cleartext", "deleted");
+
+/**
+ * An interface and caching layer for records.
+ */
+export function RecordManager(service) {
+ this.service = service;
+
+ this._log = Log.repository.getLogger(this._logName);
+ this._records = {};
+}
+
+RecordManager.prototype = {
+ _recordType: CryptoWrapper,
+ _logName: "Sync.RecordManager",
+
+ async import(url) {
+ this._log.trace("Importing record: " + (url.spec ? url.spec : url));
+ try {
+ // Clear out the last response with empty object if GET fails
+ this.response = {};
+ this.response = await this.service.resource(url).get();
+
+ // Don't parse and save the record on failure
+ if (!this.response.success) {
+ return null;
+ }
+
+ let record = new this._recordType(url);
+ record.deserialize(this.response.obj);
+
+ return this.set(url, record);
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Failed to import record", ex);
+ return null;
+ }
+ },
+
+ get(url) {
+ // Use a url string as the key to the hash
+ let spec = url.spec ? url.spec : url;
+ if (spec in this._records) {
+ return Promise.resolve(this._records[spec]);
+ }
+ return this.import(url);
+ },
+
+ set: function RecordMgr_set(url, record) {
+ let spec = url.spec ? url.spec : url;
+ return (this._records[spec] = record);
+ },
+
+ contains: function RecordMgr_contains(url) {
+ if ((url.spec || url) in this._records) {
+ return true;
+ }
+ return false;
+ },
+
+ clearCache: function recordMgr_clearCache() {
+ this._records = {};
+ },
+
+ del: function RecordMgr_del(url) {
+ delete this._records[url];
+ },
+};
+
+/**
+ * Keeps track of mappings between collection names ('tabs') and KeyBundles.
+ *
+ * You can update this thing simply by giving it /info/collections. It'll
+ * use the last modified time to bring itself up to date.
+ */
+export function CollectionKeyManager(lastModified, default_, collections) {
+ this.lastModified = lastModified || 0;
+ this._default = default_ || null;
+ this._collections = collections || {};
+
+ this._log = Log.repository.getLogger("Sync.CollectionKeyManager");
+}
+
+// TODO: persist this locally as an Identity. Bug 610913.
+// Note that the last modified time needs to be preserved.
+CollectionKeyManager.prototype = {
+ /**
+ * Generate a new CollectionKeyManager that has the same attributes
+ * as this one.
+ */
+ clone() {
+ const newCollections = {};
+ for (let c in this._collections) {
+ newCollections[c] = this._collections[c];
+ }
+
+ return new CollectionKeyManager(
+ this.lastModified,
+ this._default,
+ newCollections
+ );
+ },
+
+ // Return information about old vs new keys:
+ // * same: true if two collections are equal
+ // * changed: an array of collection names that changed.
+ _compareKeyBundleCollections: function _compareKeyBundleCollections(m1, m2) {
+ let changed = [];
+
+ function process(m1, m2) {
+ for (let k1 in m1) {
+ let v1 = m1[k1];
+ let v2 = m2[k1];
+ if (!(v1 && v2 && v1.equals(v2))) {
+ changed.push(k1);
+ }
+ }
+ }
+
+ // Diffs both ways.
+ process(m1, m2);
+ process(m2, m1);
+
+ // Return a sorted, unique array.
+ changed.sort();
+ let last;
+ changed = changed.filter(x => x != last && (last = x));
+ return { same: !changed.length, changed };
+ },
+
+ get isClear() {
+ return !this._default;
+ },
+
+ clear: function clear() {
+ this._log.info("Clearing collection keys...");
+ this.lastModified = 0;
+ this._collections = {};
+ this._default = null;
+ },
+
+ keyForCollection(collection) {
+ if (collection && this._collections[collection]) {
+ return this._collections[collection];
+ }
+
+ return this._default;
+ },
+
+ /**
+ * If `collections` (an array of strings) is provided, iterate
+ * over it and generate random keys for each collection.
+ * Create a WBO for the given data.
+ */
+ _makeWBO(collections, defaultBundle) {
+ let wbo = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let c = {};
+ for (let k in collections) {
+ c[k] = collections[k].keyPairB64;
+ }
+ wbo.cleartext = {
+ default: defaultBundle ? defaultBundle.keyPairB64 : null,
+ collections: c,
+ collection: CRYPTO_COLLECTION,
+ id: KEYS_WBO,
+ };
+ return wbo;
+ },
+
+ /**
+ * Create a WBO for the current keys.
+ */
+ asWBO(collection, id) {
+ return this._makeWBO(this._collections, this._default);
+ },
+
+ /**
+ * Compute a new default key, and new keys for any specified collections.
+ */
+ async newKeys(collections) {
+ let newDefaultKeyBundle = await this.newDefaultKeyBundle();
+
+ let newColls = {};
+ if (collections) {
+ for (let c of collections) {
+ let b = new BulkKeyBundle(c);
+ await b.generateRandom();
+ newColls[c] = b;
+ }
+ }
+ return [newDefaultKeyBundle, newColls];
+ },
+
+ /**
+ * Generates new keys, but does not replace our local copy. Use this to
+ * verify an upload before storing.
+ */
+ async generateNewKeysWBO(collections) {
+ let newDefaultKey, newColls;
+ [newDefaultKey, newColls] = await this.newKeys(collections);
+
+ return this._makeWBO(newColls, newDefaultKey);
+ },
+
+ /**
+ * Create a new default key.
+ *
+ * @returns {BulkKeyBundle}
+ */
+ async newDefaultKeyBundle() {
+ const key = new BulkKeyBundle(DEFAULT_KEYBUNDLE_NAME);
+ await key.generateRandom();
+ return key;
+ },
+
+ /**
+ * Create a new default key and store it as this._default, since without one you cannot use setContents.
+ */
+ async generateDefaultKey() {
+ this._default = await this.newDefaultKeyBundle();
+ },
+
+ /**
+ * Return true if keys are already present for each of the given
+ * collections.
+ */
+ hasKeysFor(collections) {
+ // We can't use filter() here because sometimes collections is an iterator.
+ for (let collection of collections) {
+ if (!this._collections[collection]) {
+ return false;
+ }
+ }
+ return true;
+ },
+
+ /**
+ * Return a new CollectionKeyManager that has keys for each of the
+ * given collections (creating new ones for collections where we
+ * don't already have keys).
+ */
+ async ensureKeysFor(collections) {
+ const newKeys = Object.assign({}, this._collections);
+ for (let c of collections) {
+ if (newKeys[c]) {
+ continue; // don't replace existing keys
+ }
+
+ const b = new BulkKeyBundle(c);
+ await b.generateRandom();
+ newKeys[c] = b;
+ }
+ return new CollectionKeyManager(this.lastModified, this._default, newKeys);
+ },
+
+ // Take the fetched info/collections WBO, checking the change
+ // time of the crypto collection.
+ updateNeeded(info_collections) {
+ this._log.info(
+ "Testing for updateNeeded. Last modified: " + this.lastModified
+ );
+
+ // No local record of modification time? Need an update.
+ if (!this.lastModified) {
+ return true;
+ }
+
+ // No keys on the server? We need an update, though our
+ // update handling will be a little more drastic...
+ if (!(CRYPTO_COLLECTION in info_collections)) {
+ return true;
+ }
+
+ // Otherwise, we need an update if our modification time is stale.
+ return info_collections[CRYPTO_COLLECTION] > this.lastModified;
+ },
+
+ //
+ // Set our keys and modified time to the values fetched from the server.
+ // Returns one of three values:
+ //
+ // * If the default key was modified, return true.
+ // * If the default key was not modified, but per-collection keys were,
+ // return an array of such.
+ // * Otherwise, return false -- we were up-to-date.
+ //
+ setContents: function setContents(payload, modified) {
+ let self = this;
+
+ this._log.info(
+ "Setting collection keys contents. Our last modified: " +
+ this.lastModified +
+ ", input modified: " +
+ modified +
+ "."
+ );
+
+ if (!payload) {
+ throw new Error("No payload in CollectionKeyManager.setContents().");
+ }
+
+ if (!payload.default) {
+ this._log.warn("No downloaded default key: this should not occur.");
+ this._log.warn("Not clearing local keys.");
+ throw new Error(
+ "No default key in CollectionKeyManager.setContents(). Cannot proceed."
+ );
+ }
+
+ // Process the incoming default key.
+ let b = new BulkKeyBundle(DEFAULT_KEYBUNDLE_NAME);
+ b.keyPairB64 = payload.default;
+ let newDefault = b;
+
+ // Process the incoming collections.
+ let newCollections = {};
+ if ("collections" in payload) {
+ this._log.info("Processing downloaded per-collection keys.");
+ let colls = payload.collections;
+ for (let k in colls) {
+ let v = colls[k];
+ if (v) {
+ let keyObj = new BulkKeyBundle(k);
+ keyObj.keyPairB64 = v;
+ newCollections[k] = keyObj;
+ }
+ }
+ }
+
+ // Check to see if these are already our keys.
+ let sameDefault = this._default && this._default.equals(newDefault);
+ let collComparison = this._compareKeyBundleCollections(
+ newCollections,
+ this._collections
+ );
+ let sameColls = collComparison.same;
+
+ if (sameDefault && sameColls) {
+ self._log.info("New keys are the same as our old keys!");
+ if (modified) {
+ self._log.info("Bumped local modified time.");
+ self.lastModified = modified;
+ }
+ return false;
+ }
+
+ // Make sure things are nice and tidy before we set.
+ this.clear();
+
+ this._log.info("Saving downloaded keys.");
+ this._default = newDefault;
+ this._collections = newCollections;
+
+ // Always trust the server.
+ if (modified) {
+ self._log.info("Bumping last modified to " + modified);
+ self.lastModified = modified;
+ }
+
+ return sameDefault ? collComparison.changed : true;
+ },
+
+ async updateContents(syncKeyBundle, storage_keys) {
+ let log = this._log;
+ log.info("Updating collection keys...");
+
+ // storage_keys is a WBO, fetched from storage/crypto/keys.
+ // Its payload is the default key, and a map of collections to keys.
+ // We lazily compute the key objects from the strings we're given.
+
+ let payload;
+ try {
+ payload = await storage_keys.decrypt(syncKeyBundle);
+ } catch (ex) {
+ log.warn("Got exception decrypting storage keys with sync key.", ex);
+ log.info("Aborting updateContents. Rethrowing.");
+ throw ex;
+ }
+
+ let r = this.setContents(payload, storage_keys.modified);
+ log.info("Collection keys updated.");
+ return r;
+ },
+};
+
+export function Collection(uri, recordObj, service) {
+ if (!service) {
+ throw new Error("Collection constructor requires a service.");
+ }
+
+ Resource.call(this, uri);
+
+ // This is a bit hacky, but gets the job done.
+ let res = service.resource(uri);
+ this.authenticator = res.authenticator;
+
+ this._recordObj = recordObj;
+ this._service = service;
+
+ this._full = false;
+ this._ids = null;
+ this._limit = 0;
+ this._older = 0;
+ this._newer = 0;
+ this._data = [];
+ // optional members used by batch upload operations.
+ this._batch = null;
+ this._commit = false;
+ // Used for batch download operations -- note that this is explicitly an
+ // opaque value and not (necessarily) a number.
+ this._offset = null;
+}
+
+Collection.prototype = {
+ _logName: "Sync.Collection",
+
+ _rebuildURL: function Coll__rebuildURL() {
+ // XXX should consider what happens if it's not a URL...
+ this.uri.QueryInterface(Ci.nsIURL);
+
+ let args = [];
+ if (this.older) {
+ args.push("older=" + this.older);
+ }
+ if (this.newer) {
+ args.push("newer=" + this.newer);
+ }
+ if (this.full) {
+ args.push("full=1");
+ }
+ if (this.sort) {
+ args.push("sort=" + this.sort);
+ }
+ if (this.ids != null) {
+ args.push("ids=" + this.ids);
+ }
+ if (this.limit > 0 && this.limit != Infinity) {
+ args.push("limit=" + this.limit);
+ }
+ if (this._batch) {
+ args.push("batch=" + encodeURIComponent(this._batch));
+ }
+ if (this._commit) {
+ args.push("commit=true");
+ }
+ if (this._offset) {
+ args.push("offset=" + encodeURIComponent(this._offset));
+ }
+
+ this.uri = this.uri
+ .mutate()
+ .setQuery(args.length ? "?" + args.join("&") : "")
+ .finalize();
+ },
+
+ // get full items
+ get full() {
+ return this._full;
+ },
+ set full(value) {
+ this._full = value;
+ this._rebuildURL();
+ },
+
+ // Apply the action to a certain set of ids
+ get ids() {
+ return this._ids;
+ },
+ set ids(value) {
+ this._ids = value;
+ this._rebuildURL();
+ },
+
+ // Limit how many records to get
+ get limit() {
+ return this._limit;
+ },
+ set limit(value) {
+ this._limit = value;
+ this._rebuildURL();
+ },
+
+ // get only items modified before some date
+ get older() {
+ return this._older;
+ },
+ set older(value) {
+ this._older = value;
+ this._rebuildURL();
+ },
+
+ // get only items modified since some date
+ get newer() {
+ return this._newer;
+ },
+ set newer(value) {
+ this._newer = value;
+ this._rebuildURL();
+ },
+
+ // get items sorted by some criteria. valid values:
+ // oldest (oldest first)
+ // newest (newest first)
+ // index
+ get sort() {
+ return this._sort;
+ },
+ set sort(value) {
+ if (value && value != "oldest" && value != "newest" && value != "index") {
+ throw new TypeError(
+ `Illegal value for sort: "${value}" (should be "oldest", "newest", or "index").`
+ );
+ }
+ this._sort = value;
+ this._rebuildURL();
+ },
+
+ get offset() {
+ return this._offset;
+ },
+ set offset(value) {
+ this._offset = value;
+ this._rebuildURL();
+ },
+
+ // Set information about the batch for this request.
+ get batch() {
+ return this._batch;
+ },
+ set batch(value) {
+ this._batch = value;
+ this._rebuildURL();
+ },
+
+ get commit() {
+ return this._commit;
+ },
+ set commit(value) {
+ this._commit = value && true;
+ this._rebuildURL();
+ },
+
+ // Similar to get(), but will page through the items `batchSize` at a time,
+ // deferring calling the record handler until we've gotten them all.
+ //
+ // Returns the last response processed, and doesn't run the record handler
+ // on any items if a non-success status is received while downloading the
+ // records (or if a network error occurs).
+ async getBatched(batchSize = DEFAULT_DOWNLOAD_BATCH_SIZE) {
+ let totalLimit = Number(this.limit) || Infinity;
+ if (batchSize <= 0 || batchSize >= totalLimit) {
+ throw new Error("Invalid batch size");
+ }
+
+ if (!this.full) {
+ throw new Error("getBatched is unimplemented for guid-only GETs");
+ }
+
+ // _onComplete and _onProgress are reset after each `get` by Resource.
+ let { _onComplete, _onProgress } = this;
+ let recordBuffer = [];
+ let resp;
+ try {
+ let lastModifiedTime;
+ this.limit = batchSize;
+
+ do {
+ this._onProgress = _onProgress;
+ this._onComplete = _onComplete;
+ if (batchSize + recordBuffer.length > totalLimit) {
+ this.limit = totalLimit - recordBuffer.length;
+ }
+ this._log.trace("Performing batched GET", {
+ limit: this.limit,
+ offset: this.offset,
+ });
+ // Actually perform the request
+ resp = await this.get();
+ if (!resp.success) {
+ recordBuffer = [];
+ break;
+ }
+ for (let json of resp.obj) {
+ let record = new this._recordObj();
+ record.deserialize(json);
+ recordBuffer.push(record);
+ }
+
+ // Initialize last modified, or check that something broken isn't happening.
+ let lastModified = resp.headers["x-last-modified"];
+ if (!lastModifiedTime) {
+ lastModifiedTime = lastModified;
+ this.setHeader("X-If-Unmodified-Since", lastModified);
+ } else if (lastModified != lastModifiedTime) {
+ // Should be impossible -- We'd get a 412 in this case.
+ throw new Error(
+ "X-Last-Modified changed in the middle of a download batch! " +
+ `${lastModified} => ${lastModifiedTime}`
+ );
+ }
+
+ // If this is missing, we're finished.
+ this.offset = resp.headers["x-weave-next-offset"];
+ } while (this.offset && totalLimit > recordBuffer.length);
+ } finally {
+ // Ensure we undo any temporary state so that subsequent calls to get()
+ // or getBatched() work properly. We do this before calling the record
+ // handler so that we can more convincingly pretend to be a normal get()
+ // call. Note: we're resetting these to the values they had before this
+ // function was called.
+ this._limit = totalLimit;
+ this._offset = null;
+ delete this._headers["x-if-unmodified-since"];
+ this._rebuildURL();
+ }
+ return { response: resp, records: recordBuffer };
+ },
+
+ // This object only supports posting via the postQueue object.
+ post() {
+ throw new Error(
+ "Don't directly post to a collection - use newPostQueue instead"
+ );
+ },
+
+ newPostQueue(log, timestamp, postCallback) {
+ let poster = (data, headers, batch, commit) => {
+ this.batch = batch;
+ this.commit = commit;
+ for (let [header, value] of headers) {
+ this.setHeader(header, value);
+ }
+ return Resource.prototype.post.call(this, data);
+ };
+ return new PostQueue(
+ poster,
+ timestamp,
+ this._service.serverConfiguration || {},
+ log,
+ postCallback
+ );
+ },
+};
+
+Object.setPrototypeOf(Collection.prototype, Resource.prototype);
+
+// These are limits for requests provided by the server at the
+// info/configuration endpoint -- server documentation is available here:
+// http://moz-services-docs.readthedocs.io/en/latest/storage/apis-1.5.html#api-instructions
+//
+// All are optional, however we synthesize (non-infinite) default values for the
+// "max_request_bytes" and "max_record_payload_bytes" options. For the others,
+// we ignore them (we treat the limit is infinite) if they're missing.
+//
+// These are also the only ones that all servers (even batching-disabled
+// servers) should support, at least once this sync-serverstorage patch is
+// everywhere https://github.com/mozilla-services/server-syncstorage/pull/74
+//
+// Batching enabled servers also limit the amount of payload data and number
+// of and records we can send in a single post as well as in the whole batch.
+// Note that the byte limits for these there are just with respect to the
+// *payload* data, e.g. the data appearing in the payload property (a
+// string) of the object.
+//
+// Note that in practice, these limits should be sensible, but the code makes
+// no assumptions about this. If we hit any of the limits, we perform the
+// corresponding action (e.g. submit a request, possibly committing the
+// current batch).
+const DefaultPostQueueConfig = Object.freeze({
+ // Number of total bytes allowed in a request
+ max_request_bytes: 260 * 1024,
+
+ // Maximum number of bytes allowed in the "payload" property of a record.
+ max_record_payload_bytes: 256 * 1024,
+
+ // The limit for how many bytes worth of data appearing in "payload"
+ // properties are allowed in a single post.
+ max_post_bytes: Infinity,
+
+ // The limit for the number of records allowed in a single post.
+ max_post_records: Infinity,
+
+ // The limit for how many bytes worth of data appearing in "payload"
+ // properties are allowed in a batch. (Same as max_post_bytes, but for
+ // batches).
+ max_total_bytes: Infinity,
+
+ // The limit for the number of records allowed in a single post. (Same
+ // as max_post_records, but for batches).
+ max_total_records: Infinity,
+});
+
+// Manages a pair of (byte, count) limits for a PostQueue, such as
+// (max_post_bytes, max_post_records) or (max_total_bytes, max_total_records).
+class LimitTracker {
+ constructor(maxBytes, maxRecords) {
+ this.maxBytes = maxBytes;
+ this.maxRecords = maxRecords;
+ this.curBytes = 0;
+ this.curRecords = 0;
+ }
+
+ clear() {
+ this.curBytes = 0;
+ this.curRecords = 0;
+ }
+
+ canAddRecord(payloadSize) {
+ // The record counts are inclusive, but depending on the version of the
+ // server, the byte counts may or may not be inclusive (See
+ // https://github.com/mozilla-services/server-syncstorage/issues/73).
+ return (
+ this.curRecords + 1 <= this.maxRecords &&
+ this.curBytes + payloadSize < this.maxBytes
+ );
+ }
+
+ canNeverAdd(recordSize) {
+ return recordSize >= this.maxBytes;
+ }
+
+ didAddRecord(recordSize) {
+ if (!this.canAddRecord(recordSize)) {
+ // This is a bug, caller is expected to call canAddRecord first.
+ throw new Error(
+ "LimitTracker.canAddRecord must be checked before adding record"
+ );
+ }
+ this.curRecords += 1;
+ this.curBytes += recordSize;
+ }
+}
+
+/* A helper to manage the posting of records while respecting the various
+ size limits.
+
+ This supports the concept of a server-side "batch". The general idea is:
+ * We queue as many records as allowed in memory, then make a single POST.
+ * This first POST (optionally) gives us a batch ID, which we use for
+ all subsequent posts, until...
+ * At some point we hit a batch-maximum, and jump through a few hoops to
+ commit the current batch (ie, all previous POSTs) and start a new one.
+ * Eventually commit the final batch.
+
+ In most cases we expect there to be exactly 1 batch consisting of possibly
+ multiple POSTs.
+*/
+export function PostQueue(poster, timestamp, serverConfig, log, postCallback) {
+ // The "post" function we should use when it comes time to do the post.
+ this.poster = poster;
+ this.log = log;
+
+ let config = Object.assign({}, DefaultPostQueueConfig, serverConfig);
+
+ if (!serverConfig.max_request_bytes && serverConfig.max_post_bytes) {
+ // Use max_post_bytes for max_request_bytes if it's missing. Only needed
+ // until server-syncstorage/pull/74 is everywhere, and even then it's
+ // unnecessary if the server limits are configured sanely (there's no
+ // guarantee of -- at least before that is fully deployed)
+ config.max_request_bytes = serverConfig.max_post_bytes;
+ }
+
+ this.log.trace("new PostQueue config (after defaults): ", config);
+
+ // The callback we make with the response when we do get around to making the
+ // post (which could be during any of the enqueue() calls or the final flush())
+ // This callback may be called multiple times and must not add new items to
+ // the queue.
+ // The second argument passed to this callback is a boolean value that is true
+ // if we're in the middle of a batch, and false if either the batch is
+ // complete, or it's a post to a server that does not understand batching.
+ this.postCallback = postCallback;
+
+ // Tracks the count and combined payload size for the records we've queued
+ // so far but are yet to POST.
+ this.postLimits = new LimitTracker(
+ config.max_post_bytes,
+ config.max_post_records
+ );
+
+ // As above, but for the batch size.
+ this.batchLimits = new LimitTracker(
+ config.max_total_bytes,
+ config.max_total_records
+ );
+
+ // Limit for the size of `this.queued` before we do a post.
+ this.maxRequestBytes = config.max_request_bytes;
+
+ // Limit for the size of incoming record payloads.
+ this.maxPayloadBytes = config.max_record_payload_bytes;
+
+ // The string where we are capturing the stringified version of the records
+ // queued so far. It will always be invalid JSON as it is always missing the
+ // closing bracket. It's also used to track whether or not we've gone past
+ // maxRequestBytes.
+ this.queued = "";
+
+ // The ID of our current batch. Can be undefined (meaning we are yet to make
+ // the first post of a patch, so don't know if we have a batch), null (meaning
+ // we've made the first post but the server response indicated no batching
+ // semantics), otherwise we have made the first post and it holds the batch ID
+ // returned from the server.
+ this.batchID = undefined;
+
+ // Time used for X-If-Unmodified-Since -- should be the timestamp from the last GET.
+ this.lastModified = timestamp;
+}
+
+PostQueue.prototype = {
+ async enqueue(record) {
+ // We want to ensure the record has a .toJSON() method defined - even
+ // though JSON.stringify() would implicitly call it, the stringify might
+ // still work even if it isn't defined, which isn't what we want.
+ let jsonRepr = record.toJSON();
+ if (!jsonRepr) {
+ throw new Error(
+ "You must only call this with objects that explicitly support JSON"
+ );
+ }
+
+ let bytes = JSON.stringify(jsonRepr);
+
+ // We use the payload size for the LimitTrackers, since that's what the
+ // byte limits other than max_request_bytes refer to.
+ let payloadLength = jsonRepr.payload.length;
+
+ // The `+ 2` is to account for the 2-byte (maximum) overhead (one byte for
+ // the leading comma or "[", which all records will have, and the other for
+ // the final trailing "]", only present for the last record).
+ let encodedLength = bytes.length + 2;
+
+ // Check first if there's some limit that indicates we cannot ever enqueue
+ // this record.
+ let isTooBig =
+ this.postLimits.canNeverAdd(payloadLength) ||
+ this.batchLimits.canNeverAdd(payloadLength) ||
+ encodedLength >= this.maxRequestBytes ||
+ payloadLength >= this.maxPayloadBytes;
+
+ if (isTooBig) {
+ return {
+ enqueued: false,
+ error: new Error("Single record too large to submit to server"),
+ };
+ }
+
+ let canPostRecord = this.postLimits.canAddRecord(payloadLength);
+ let canBatchRecord = this.batchLimits.canAddRecord(payloadLength);
+ let canSendRecord =
+ this.queued.length + encodedLength < this.maxRequestBytes;
+
+ if (!canPostRecord || !canBatchRecord || !canSendRecord) {
+ this.log.trace("PostQueue flushing: ", {
+ canPostRecord,
+ canSendRecord,
+ canBatchRecord,
+ });
+ // We need to write the queue out before handling this one, but we only
+ // commit the batch (and thus start a new one) if the record couldn't fit
+ // inside the batch.
+ await this.flush(!canBatchRecord);
+ }
+
+ this.postLimits.didAddRecord(payloadLength);
+ this.batchLimits.didAddRecord(payloadLength);
+
+ // Either a ',' or a '[' depending on whether this is the first record.
+ this.queued += this.queued.length ? "," : "[";
+ this.queued += bytes;
+ return { enqueued: true };
+ },
+
+ async flush(finalBatchPost) {
+ if (!this.queued) {
+ // nothing queued - we can't be in a batch, and something has gone very
+ // bad if we think we are.
+ if (this.batchID) {
+ throw new Error(
+ `Flush called when no queued records but we are in a batch ${this.batchID}`
+ );
+ }
+ return;
+ }
+ // the batch query-param and headers we'll send.
+ let batch;
+ let headers = [];
+ if (this.batchID === undefined) {
+ // First commit in a (possible) batch.
+ batch = "true";
+ } else if (this.batchID) {
+ // We have an existing batch.
+ batch = this.batchID;
+ } else {
+ // Not the first post and we know we have no batch semantics.
+ batch = null;
+ }
+
+ headers.push(["x-if-unmodified-since", this.lastModified]);
+
+ let numQueued = this.postLimits.curRecords;
+ this.log.info(
+ `Posting ${numQueued} records of ${
+ this.queued.length + 1
+ } bytes with batch=${batch}`
+ );
+ let queued = this.queued + "]";
+ if (finalBatchPost) {
+ this.batchLimits.clear();
+ }
+ this.postLimits.clear();
+ this.queued = "";
+ let response = await this.poster(
+ queued,
+ headers,
+ batch,
+ !!(finalBatchPost && this.batchID !== null)
+ );
+
+ if (!response.success) {
+ this.log.trace("Server error response during a batch", response);
+ // not clear what we should do here - we expect the consumer of this to
+ // abort by throwing in the postCallback below.
+ await this.postCallback(this, response, !finalBatchPost);
+ return;
+ }
+
+ if (finalBatchPost) {
+ this.log.trace("Committed batch", this.batchID);
+ this.batchID = undefined; // we are now in "first post for the batch" state.
+ this.lastModified = response.headers["x-last-modified"];
+ await this.postCallback(this, response, false);
+ return;
+ }
+
+ if (response.status != 202) {
+ if (this.batchID) {
+ throw new Error(
+ "Server responded non-202 success code while a batch was in progress"
+ );
+ }
+ this.batchID = null; // no batch semantics are in place.
+ this.lastModified = response.headers["x-last-modified"];
+ await this.postCallback(this, response, false);
+ return;
+ }
+
+ // this response is saying the server has batch semantics - we should
+ // always have a batch ID in the response.
+ let responseBatchID = response.obj.batch;
+ this.log.trace("Server responsed 202 with batch", responseBatchID);
+ if (!responseBatchID) {
+ this.log.error(
+ "Invalid server response: 202 without a batch ID",
+ response
+ );
+ throw new Error("Invalid server response: 202 without a batch ID");
+ }
+
+ if (this.batchID === undefined) {
+ this.batchID = responseBatchID;
+ if (!this.lastModified) {
+ this.lastModified = response.headers["x-last-modified"];
+ if (!this.lastModified) {
+ throw new Error("Batch response without x-last-modified");
+ }
+ }
+ }
+
+ if (this.batchID != responseBatchID) {
+ throw new Error(
+ `Invalid client/server batch state - client has ${this.batchID}, server has ${responseBatchID}`
+ );
+ }
+
+ await this.postCallback(this, response, true);
+ },
+};
diff --git a/services/sync/modules/resource.sys.mjs b/services/sync/modules/resource.sys.mjs
new file mode 100644
index 0000000000..537ffd5219
--- /dev/null
+++ b/services/sync/modules/resource.sys.mjs
@@ -0,0 +1,292 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Observers } from "resource://services-common/observers.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+import { setTimeout, clearTimeout } from "resource://gre/modules/Timer.sys.mjs";
+
+/* global AbortController */
+
+/*
+ * Resource represents a remote network resource, identified by a URI.
+ * Create an instance like so:
+ *
+ * let resource = new Resource("http://foobar.com/path/to/resource");
+ *
+ * The 'resource' object has the following methods to issue HTTP requests
+ * of the corresponding HTTP methods:
+ *
+ * get(callback)
+ * put(data, callback)
+ * post(data, callback)
+ * delete(callback)
+ */
+export function Resource(uri) {
+ this._log = Log.repository.getLogger(this._logName);
+ this._log.manageLevelFromPref("services.sync.log.logger.network.resources");
+ this.uri = uri;
+ this._headers = {};
+}
+
+// (static) Caches the latest server timestamp (X-Weave-Timestamp header).
+Resource.serverTime = null;
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ Resource,
+ "SEND_VERSION_INFO",
+ "services.sync.sendVersionInfo",
+ true
+);
+Resource.prototype = {
+ _logName: "Sync.Resource",
+
+ /**
+ * Callback to be invoked at request time to add authentication details.
+ * If the callback returns a promise, it will be awaited upon.
+ *
+ * By default, a global authenticator is provided. If this is set, it will
+ * be used instead of the global one.
+ */
+ authenticator: null,
+
+ // Wait 5 minutes before killing a request.
+ ABORT_TIMEOUT: 300000,
+
+ // Headers to be included when making a request for the resource.
+ // Note: Header names should be all lower case, there's no explicit
+ // check for duplicates due to case!
+ get headers() {
+ return this._headers;
+ },
+ set headers(_) {
+ throw new Error("headers can't be mutated directly. Please use setHeader.");
+ },
+ setHeader(header, value) {
+ this._headers[header.toLowerCase()] = value;
+ },
+
+ // URI representing this resource.
+ get uri() {
+ return this._uri;
+ },
+ set uri(value) {
+ if (typeof value == "string") {
+ this._uri = CommonUtils.makeURI(value);
+ } else {
+ this._uri = value;
+ }
+ },
+
+ // Get the string representation of the URI.
+ get spec() {
+ if (this._uri) {
+ return this._uri.spec;
+ }
+ return null;
+ },
+
+ /**
+ * @param {string} method HTTP method
+ * @returns {Headers}
+ */
+ async _buildHeaders(method) {
+ const headers = new Headers(this._headers);
+
+ if (Resource.SEND_VERSION_INFO) {
+ headers.append("user-agent", Utils.userAgent);
+ }
+
+ if (this.authenticator) {
+ const result = await this.authenticator(this, method);
+ if (result && result.headers) {
+ for (const [k, v] of Object.entries(result.headers)) {
+ headers.append(k.toLowerCase(), v);
+ }
+ }
+ } else {
+ this._log.debug("No authenticator found.");
+ }
+
+ // PUT and POST are treated differently because they have payload data.
+ if (("PUT" == method || "POST" == method) && !headers.has("content-type")) {
+ headers.append("content-type", "text/plain");
+ }
+
+ if (this._log.level <= Log.Level.Trace) {
+ for (const [k, v] of headers) {
+ if (k == "authorization" || k == "x-client-state") {
+ this._log.trace(`HTTP Header ${k}: ***** (suppressed)`);
+ } else {
+ this._log.trace(`HTTP Header ${k}: ${v}`);
+ }
+ }
+ }
+
+ if (!headers.has("accept")) {
+ headers.append("accept", "application/json;q=0.9,*/*;q=0.2");
+ }
+
+ return headers;
+ },
+
+ /**
+ * @param {string} method HTTP method
+ * @param {string} data HTTP body
+ * @param {object} signal AbortSignal instance
+ * @returns {Request}
+ */
+ async _createRequest(method, data, signal) {
+ const headers = await this._buildHeaders(method);
+ const init = {
+ cache: "no-store", // No cache.
+ headers,
+ method,
+ signal,
+ mozErrors: true, // Return nsresult error codes instead of a generic
+ // NetworkError when fetch rejects.
+ };
+
+ if (data) {
+ if (!(typeof data == "string" || data instanceof String)) {
+ data = JSON.stringify(data);
+ }
+ this._log.debug(`${method} Length: ${data.length}`);
+ this._log.trace(`${method} Body: ${data}`);
+ init.body = data;
+ }
+ return new Request(this.uri.spec, init);
+ },
+
+ /**
+ * @param {string} method HTTP method
+ * @param {string} [data] HTTP body
+ * @returns {Response}
+ */
+ async _doRequest(method, data = null) {
+ const controller = new AbortController();
+ const request = await this._createRequest(method, data, controller.signal);
+ const responsePromise = fetch(request); // Rejects on network failure.
+ let didTimeout = false;
+ const timeoutId = setTimeout(() => {
+ didTimeout = true;
+ this._log.error(
+ `Request timed out after ${this.ABORT_TIMEOUT}ms. Aborting.`
+ );
+ controller.abort();
+ }, this.ABORT_TIMEOUT);
+ let response;
+ try {
+ response = await responsePromise;
+ } catch (e) {
+ this._log.warn(`${method} request to ${this.uri.spec} failed`, e);
+ if (!didTimeout) {
+ throw e;
+ }
+ throw Components.Exception(
+ "Request aborted (timeout)",
+ Cr.NS_ERROR_NET_TIMEOUT
+ );
+ } finally {
+ clearTimeout(timeoutId);
+ }
+ return this._processResponse(response, method);
+ },
+
+ async _processResponse(response, method) {
+ const data = await response.text();
+ this._logResponse(response, method, data);
+ this._processResponseHeaders(response);
+
+ const ret = {
+ data,
+ url: response.url,
+ status: response.status,
+ success: response.ok,
+ headers: {},
+ };
+ for (const [k, v] of response.headers) {
+ ret.headers[k] = v;
+ }
+
+ // Make a lazy getter to convert the json response into an object.
+ // Note that this can cause a parse error to be thrown far away from the
+ // actual fetch, so be warned!
+ ChromeUtils.defineLazyGetter(ret, "obj", () => {
+ try {
+ return JSON.parse(ret.data);
+ } catch (ex) {
+ this._log.warn("Got exception parsing response body", ex);
+ // Stringify to avoid possibly printing non-printable characters.
+ this._log.debug(
+ "Parse fail: Response body starts",
+ (ret.data + "").slice(0, 100)
+ );
+ throw ex;
+ }
+ });
+
+ return ret;
+ },
+
+ _logResponse(response, method, data) {
+ const { status, ok: success, url } = response;
+
+ // Log the status of the request.
+ this._log.debug(
+ `${method} ${success ? "success" : "fail"} ${status} ${url}`
+ );
+
+ // Additionally give the full response body when Trace logging.
+ if (this._log.level <= Log.Level.Trace) {
+ this._log.trace(`${method} body`, data);
+ }
+
+ if (!success) {
+ this._log.warn(
+ `${method} request to ${url} failed with status ${status}`
+ );
+ }
+ },
+
+ _processResponseHeaders({ headers, ok: success }) {
+ if (headers.has("x-weave-timestamp")) {
+ Resource.serverTime = parseFloat(headers.get("x-weave-timestamp"));
+ }
+ // This is a server-side safety valve to allow slowing down
+ // clients without hurting performance.
+ if (headers.has("x-weave-backoff")) {
+ let backoff = headers.get("x-weave-backoff");
+ this._log.debug(`Got X-Weave-Backoff: ${backoff}`);
+ Observers.notify("weave:service:backoff:interval", parseInt(backoff, 10));
+ }
+
+ if (success && headers.has("x-weave-quota-remaining")) {
+ Observers.notify(
+ "weave:service:quota:remaining",
+ parseInt(headers.get("x-weave-quota-remaining"), 10)
+ );
+ }
+ },
+
+ get() {
+ return this._doRequest("GET");
+ },
+
+ put(data) {
+ return this._doRequest("PUT", data);
+ },
+
+ post(data) {
+ return this._doRequest("POST", data);
+ },
+
+ delete() {
+ return this._doRequest("DELETE");
+ },
+};
diff --git a/services/sync/modules/service.sys.mjs b/services/sync/modules/service.sys.mjs
new file mode 100644
index 0000000000..97ba0d32cd
--- /dev/null
+++ b/services/sync/modules/service.sys.mjs
@@ -0,0 +1,1643 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const CRYPTO_COLLECTION = "crypto";
+const KEYS_WBO = "keys";
+
+import { AppConstants } from "resource://gre/modules/AppConstants.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+import {
+ CLIENT_NOT_CONFIGURED,
+ CREDENTIALS_CHANGED,
+ HMAC_EVENT_INTERVAL,
+ LOGIN_FAILED,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_NETWORK_ERROR,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_NO_USERNAME,
+ LOGIN_FAILED_SERVER_ERROR,
+ LOGIN_SUCCEEDED,
+ MASTER_PASSWORD_LOCKED,
+ METARECORD_DOWNLOAD_FAIL,
+ NO_SYNC_NODE_FOUND,
+ PREFS_BRANCH,
+ STATUS_DISABLED,
+ STATUS_OK,
+ STORAGE_VERSION,
+ VERSION_OUT_OF_DATE,
+ WEAVE_VERSION,
+ kFirefoxShuttingDown,
+ kFirstSyncChoiceNotMade,
+ kSyncBackoffNotMet,
+ kSyncMasterPasswordLocked,
+ kSyncNetworkOffline,
+ kSyncNotConfigured,
+ kSyncWeaveDisabled,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { EngineManager } from "resource://services-sync/engines.sys.mjs";
+import { ClientEngine } from "resource://services-sync/engines/clients.sys.mjs";
+import { Weave } from "resource://services-sync/main.sys.mjs";
+import {
+ ErrorHandler,
+ SyncScheduler,
+} from "resource://services-sync/policies.sys.mjs";
+import {
+ CollectionKeyManager,
+ CryptoWrapper,
+ RecordManager,
+ WBORecord,
+} from "resource://services-sync/record.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { EngineSynchronizer } from "resource://services-sync/stages/enginesync.sys.mjs";
+import { DeclinedEngines } from "resource://services-sync/stages/declined.sys.mjs";
+import { Status } from "resource://services-sync/status.sys.mjs";
+
+ChromeUtils.importESModule("resource://services-sync/telemetry.sys.mjs");
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { getFxAccountsSingleton } from "resource://gre/modules/FxAccounts.sys.mjs";
+
+const fxAccounts = getFxAccountsSingleton();
+
+function getEngineModules() {
+ let result = {
+ Addons: { module: "addons.sys.mjs", symbol: "AddonsEngine" },
+ Password: { module: "passwords.sys.mjs", symbol: "PasswordEngine" },
+ Prefs: { module: "prefs.sys.mjs", symbol: "PrefsEngine" },
+ };
+ if (AppConstants.MOZ_APP_NAME != "thunderbird") {
+ result.Bookmarks = {
+ module: "bookmarks.sys.mjs",
+ symbol: "BookmarksEngine",
+ };
+ result.Form = { module: "forms.sys.mjs", symbol: "FormEngine" };
+ result.History = { module: "history.sys.mjs", symbol: "HistoryEngine" };
+ result.Tab = { module: "tabs.sys.mjs", symbol: "TabEngine" };
+ }
+ if (Svc.PrefBranch.getBoolPref("engine.addresses.available", false)) {
+ result.Addresses = {
+ module: "resource://autofill/FormAutofillSync.sys.mjs",
+ symbol: "AddressesEngine",
+ };
+ }
+ if (Svc.PrefBranch.getBoolPref("engine.creditcards.available", false)) {
+ result.CreditCards = {
+ module: "resource://autofill/FormAutofillSync.sys.mjs",
+ symbol: "CreditCardsEngine",
+ };
+ }
+ result["Extension-Storage"] = {
+ module: "extension-storage.sys.mjs",
+ controllingPref: "webextensions.storage.sync.kinto",
+ whenTrue: "ExtensionStorageEngineKinto",
+ whenFalse: "ExtensionStorageEngineBridge",
+ };
+ return result;
+}
+
+const lazy = {};
+
+// A unique identifier for this browser session. Used for logging so
+// we can easily see whether 2 logs are in the same browser session or
+// after the browser restarted.
+ChromeUtils.defineLazyGetter(lazy, "browserSessionID", Utils.makeGUID);
+
+function Sync11Service() {
+ this._notify = Utils.notify("weave:service:");
+ Utils.defineLazyIDProperty(this, "syncID", "services.sync.client.syncID");
+}
+Sync11Service.prototype = {
+ _lock: Utils.lock,
+ _locked: false,
+ _loggedIn: false,
+
+ infoURL: null,
+ storageURL: null,
+ metaURL: null,
+ cryptoKeyURL: null,
+ // The cluster URL comes via the identity object, which in the FxA
+ // world is ebbedded in the token returned from the token server.
+ _clusterURL: null,
+
+ get clusterURL() {
+ return this._clusterURL || "";
+ },
+ set clusterURL(value) {
+ if (value != null && typeof value != "string") {
+ throw new Error("cluster must be a string, got " + typeof value);
+ }
+ this._clusterURL = value;
+ this._updateCachedURLs();
+ },
+
+ get isLoggedIn() {
+ return this._loggedIn;
+ },
+
+ get locked() {
+ return this._locked;
+ },
+ lock: function lock() {
+ if (this._locked) {
+ return false;
+ }
+ this._locked = true;
+ return true;
+ },
+ unlock: function unlock() {
+ this._locked = false;
+ },
+
+ // A specialized variant of Utils.catch.
+ // This provides a more informative error message when we're already syncing:
+ // see Bug 616568.
+ _catch(func) {
+ function lockExceptions(ex) {
+ if (Utils.isLockException(ex)) {
+ // This only happens if we're syncing already.
+ this._log.info("Cannot start sync: already syncing?");
+ }
+ }
+
+ return Utils.catch.call(this, func, lockExceptions);
+ },
+
+ get userBaseURL() {
+ // The user URL is the cluster URL.
+ return this.clusterURL;
+ },
+
+ _updateCachedURLs: function _updateCachedURLs() {
+ // Nothing to cache yet if we don't have the building blocks
+ if (!this.clusterURL) {
+ // Also reset all other URLs used by Sync to ensure we aren't accidentally
+ // using one cached earlier - if there's no cluster URL any cached ones
+ // are invalid.
+ this.infoURL = undefined;
+ this.storageURL = undefined;
+ this.metaURL = undefined;
+ this.cryptoKeysURL = undefined;
+ return;
+ }
+
+ this._log.debug(
+ "Caching URLs under storage user base: " + this.userBaseURL
+ );
+
+ // Generate and cache various URLs under the storage API for this user
+ this.infoURL = this.userBaseURL + "info/collections";
+ this.storageURL = this.userBaseURL + "storage/";
+ this.metaURL = this.storageURL + "meta/global";
+ this.cryptoKeysURL = this.storageURL + CRYPTO_COLLECTION + "/" + KEYS_WBO;
+ },
+
+ _checkCrypto: function _checkCrypto() {
+ let ok = false;
+
+ try {
+ let iv = Weave.Crypto.generateRandomIV();
+ if (iv.length == 24) {
+ ok = true;
+ }
+ } catch (e) {
+ this._log.debug("Crypto check failed: " + e);
+ }
+
+ return ok;
+ },
+
+ /**
+ * Here is a disgusting yet reasonable way of handling HMAC errors deep in
+ * the guts of Sync. The astute reader will note that this is a hacky way of
+ * implementing something like continuable conditions.
+ *
+ * A handler function is glued to each engine. If the engine discovers an
+ * HMAC failure, we fetch keys from the server and update our keys, just as
+ * we would on startup.
+ *
+ * If our key collection changed, we signal to the engine (via our return
+ * value) that it should retry decryption.
+ *
+ * If our key collection did not change, it means that we already had the
+ * correct keys... and thus a different client has the wrong ones. Reupload
+ * the bundle that we fetched, which will bump the modified time on the
+ * server and (we hope) prompt a broken client to fix itself.
+ *
+ * We keep track of the time at which we last applied this reasoning, because
+ * thrashing doesn't solve anything. We keep a reasonable interval between
+ * these remedial actions.
+ */
+ lastHMACEvent: 0,
+
+ /*
+ * Returns whether to try again.
+ */
+ async handleHMACEvent() {
+ let now = Date.now();
+
+ // Leave a sizable delay between HMAC recovery attempts. This gives us
+ // time for another client to fix themselves if we touch the record.
+ if (now - this.lastHMACEvent < HMAC_EVENT_INTERVAL) {
+ return false;
+ }
+
+ this._log.info(
+ "Bad HMAC event detected. Attempting recovery " +
+ "or signaling to other clients."
+ );
+
+ // Set the last handled time so that we don't act again.
+ this.lastHMACEvent = now;
+
+ // Fetch keys.
+ let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ try {
+ let cryptoResp = (
+ await cryptoKeys.fetch(this.resource(this.cryptoKeysURL))
+ ).response;
+
+ // Save out the ciphertext for when we reupload. If there's a bug in
+ // CollectionKeyManager, this will prevent us from uploading junk.
+ let cipherText = cryptoKeys.ciphertext;
+
+ if (!cryptoResp.success) {
+ this._log.warn("Failed to download keys.");
+ return false;
+ }
+
+ let keysChanged = await this.handleFetchedKeys(
+ this.identity.syncKeyBundle,
+ cryptoKeys,
+ true
+ );
+ if (keysChanged) {
+ // Did they change? If so, carry on.
+ this._log.info("Suggesting retry.");
+ return true; // Try again.
+ }
+
+ // If not, reupload them and continue the current sync.
+ cryptoKeys.ciphertext = cipherText;
+ cryptoKeys.cleartext = null;
+
+ let uploadResp = await this._uploadCryptoKeys(
+ cryptoKeys,
+ cryptoResp.obj.modified
+ );
+ if (uploadResp.success) {
+ this._log.info("Successfully re-uploaded keys. Continuing sync.");
+ } else {
+ this._log.warn(
+ "Got error response re-uploading keys. " +
+ "Continuing sync; let's try again later."
+ );
+ }
+
+ return false; // Don't try again: same keys.
+ } catch (ex) {
+ this._log.warn(
+ "Got exception fetching and handling crypto keys. " +
+ "Will try again later.",
+ ex
+ );
+ return false;
+ }
+ },
+
+ async handleFetchedKeys(syncKey, cryptoKeys, skipReset) {
+ // Don't want to wipe if we're just starting up!
+ let wasBlank = this.collectionKeys.isClear;
+ let keysChanged = await this.collectionKeys.updateContents(
+ syncKey,
+ cryptoKeys
+ );
+
+ if (keysChanged && !wasBlank) {
+ this._log.debug("Keys changed: " + JSON.stringify(keysChanged));
+
+ if (!skipReset) {
+ this._log.info("Resetting client to reflect key change.");
+
+ if (keysChanged.length) {
+ // Collection keys only. Reset individual engines.
+ await this.resetClient(keysChanged);
+ } else {
+ // Default key changed: wipe it all.
+ await this.resetClient();
+ }
+
+ this._log.info("Downloaded new keys, client reset. Proceeding.");
+ }
+ return true;
+ }
+ return false;
+ },
+
+ /**
+ * Prepare to initialize the rest of Weave after waiting a little bit
+ */
+ async onStartup() {
+ this.status = Status;
+ this.identity = Status._authManager;
+ this.collectionKeys = new CollectionKeyManager();
+
+ this.scheduler = new SyncScheduler(this);
+ this.errorHandler = new ErrorHandler(this);
+
+ this._log = Log.repository.getLogger("Sync.Service");
+ this._log.manageLevelFromPref("services.sync.log.logger.service.main");
+
+ this._log.info("Loading Weave " + WEAVE_VERSION);
+
+ this.recordManager = new RecordManager(this);
+
+ this.enabled = true;
+
+ await this._registerEngines();
+
+ let ua = Cc["@mozilla.org/network/protocol;1?name=http"].getService(
+ Ci.nsIHttpProtocolHandler
+ ).userAgent;
+ this._log.info(ua);
+
+ if (!this._checkCrypto()) {
+ this.enabled = false;
+ this._log.info(
+ "Could not load the Weave crypto component. Disabling " +
+ "Weave, since it will not work correctly."
+ );
+ }
+
+ Svc.Obs.add("weave:service:setup-complete", this);
+ Svc.Obs.add("sync:collection_changed", this); // Pulled from FxAccountsCommon
+ Svc.Obs.add("fxaccounts:device_disconnected", this);
+ Services.prefs.addObserver(PREFS_BRANCH + "engine.", this);
+
+ if (!this.enabled) {
+ this._log.info("Firefox Sync disabled.");
+ }
+
+ this._updateCachedURLs();
+
+ let status = this._checkSetup();
+ if (status != STATUS_DISABLED && status != CLIENT_NOT_CONFIGURED) {
+ this._startTracking();
+ }
+
+ // Send an event now that Weave service is ready. We don't do this
+ // synchronously so that observers can import this module before
+ // registering an observer.
+ CommonUtils.nextTick(() => {
+ this.status.ready = true;
+
+ // UI code uses the flag on the XPCOM service so it doesn't have
+ // to load a bunch of modules.
+ let xps = Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+ xps.ready = true;
+
+ Svc.Obs.notify("weave:service:ready");
+ });
+ },
+
+ _checkSetup: function _checkSetup() {
+ if (!this.enabled) {
+ return (this.status.service = STATUS_DISABLED);
+ }
+ return this.status.checkSetup();
+ },
+
+ /**
+ * Register the built-in engines for certain applications
+ */
+ async _registerEngines() {
+ this.engineManager = new EngineManager(this);
+
+ let engineModules = getEngineModules();
+
+ let engines = [];
+ // We allow a pref, which has no default value, to limit the engines
+ // which are registered. We expect only tests will use this.
+ if (
+ Svc.PrefBranch.getPrefType("registerEngines") !=
+ Ci.nsIPrefBranch.PREF_INVALID
+ ) {
+ engines = Svc.PrefBranch.getStringPref("registerEngines").split(",");
+ this._log.info("Registering custom set of engines", engines);
+ } else {
+ // default is all engines.
+ engines = Object.keys(engineModules);
+ }
+
+ let declined = [];
+ let pref = Svc.PrefBranch.getStringPref("declinedEngines", null);
+ if (pref) {
+ declined = pref.split(",");
+ }
+
+ let clientsEngine = new ClientEngine(this);
+ // Ideally clientsEngine should not exist
+ // (or be a promise that calls initialize() before returning the engine)
+ await clientsEngine.initialize();
+ this.clientsEngine = clientsEngine;
+
+ for (let name of engines) {
+ if (!(name in engineModules)) {
+ this._log.info("Do not know about engine: " + name);
+ continue;
+ }
+ let modInfo = engineModules[name];
+ if (!modInfo.module.includes(":")) {
+ modInfo.module = "resource://services-sync/engines/" + modInfo.module;
+ }
+ try {
+ let ns = ChromeUtils.importESModule(modInfo.module);
+ if (modInfo.symbol) {
+ let symbol = modInfo.symbol;
+ if (!(symbol in ns)) {
+ this._log.warn(
+ "Could not find exported engine instance: " + symbol
+ );
+ continue;
+ }
+ await this.engineManager.register(ns[symbol]);
+ } else {
+ let { whenTrue, whenFalse, controllingPref } = modInfo;
+ if (!(whenTrue in ns) || !(whenFalse in ns)) {
+ this._log.warn("Could not find all exported engine instances", {
+ whenTrue,
+ whenFalse,
+ });
+ continue;
+ }
+ await this.engineManager.registerAlternatives(
+ name.toLowerCase(),
+ controllingPref,
+ ns[whenTrue],
+ ns[whenFalse]
+ );
+ }
+ } catch (ex) {
+ this._log.warn("Could not register engine " + name, ex);
+ }
+ }
+
+ this.engineManager.setDeclined(declined);
+ },
+
+ /**
+ * This method updates the local engines state from an existing meta/global
+ * when Sync is disabled.
+ * Running this code if sync is enabled would end up in very weird results
+ * (but we're nice and we check before doing anything!).
+ */
+ async updateLocalEnginesState() {
+ await this.promiseInitialized;
+
+ // Sanity check, this method is not meant to be run if Sync is enabled!
+ if (Svc.PrefBranch.getStringPref("username", "")) {
+ throw new Error("Sync is enabled!");
+ }
+
+ // For historical reasons the behaviour of setCluster() is bizarre,
+ // so just check what we care about - the meta URL.
+ if (!this.metaURL) {
+ await this.identity.setCluster();
+ if (!this.metaURL) {
+ this._log.warn("Could not find a cluster.");
+ return;
+ }
+ }
+ // Clear the cache so we always fetch the latest meta/global.
+ this.recordManager.clearCache();
+ let meta = await this.recordManager.get(this.metaURL);
+ if (!meta) {
+ this._log.info("Meta record is null, aborting engine state update.");
+ return;
+ }
+ const declinedEngines = meta.payload.declined;
+ const allEngines = this.engineManager.getAll().map(e => e.name);
+ // We don't want our observer of the enabled prefs to treat the change as
+ // a user-change, otherwise we will do the wrong thing with declined etc.
+ this._ignorePrefObserver = true;
+ try {
+ for (const engine of allEngines) {
+ Svc.PrefBranch.setBoolPref(
+ `engine.${engine}`,
+ !declinedEngines.includes(engine)
+ );
+ }
+ } finally {
+ this._ignorePrefObserver = false;
+ }
+ },
+
+ QueryInterface: ChromeUtils.generateQI([
+ "nsIObserver",
+ "nsISupportsWeakReference",
+ ]),
+
+ observe(subject, topic, data) {
+ switch (topic) {
+ // Ideally this observer should be in the SyncScheduler, but it would require
+ // some work to know about the sync specific engines. We should move this there once it does.
+ case "sync:collection_changed":
+ // We check if we're running TPS here to avoid TPS failing because it
+ // couldn't get to get the sync lock, due to us currently syncing the
+ // clients engine.
+ if (
+ data.includes("clients") &&
+ !Svc.PrefBranch.getBoolPref("testing.tps", false)
+ ) {
+ // Sync in the background (it's fine not to wait on the returned promise
+ // because sync() has a lock).
+ // [] = clients collection only
+ this.sync({ why: "collection_changed", engines: [] }).catch(e => {
+ this._log.error(e);
+ });
+ }
+ break;
+ case "fxaccounts:device_disconnected":
+ data = JSON.parse(data);
+ if (!data.isLocalDevice) {
+ // Refresh the known stale clients list in the background.
+ this.clientsEngine.updateKnownStaleClients().catch(e => {
+ this._log.error(e);
+ });
+ }
+ break;
+ case "weave:service:setup-complete":
+ let status = this._checkSetup();
+ if (status != STATUS_DISABLED && status != CLIENT_NOT_CONFIGURED) {
+ this._startTracking();
+ }
+ break;
+ case "nsPref:changed":
+ if (this._ignorePrefObserver) {
+ return;
+ }
+ const engine = data.slice((PREFS_BRANCH + "engine.").length);
+ if (engine.includes(".")) {
+ // A sub-preference of the engine was changed. For example
+ // `services.sync.engine.bookmarks.validation.percentageChance`.
+ return;
+ }
+ this._handleEngineStatusChanged(engine);
+ break;
+ }
+ },
+
+ _handleEngineStatusChanged(engine) {
+ this._log.trace("Status for " + engine + " engine changed.");
+ if (Svc.PrefBranch.getBoolPref("engineStatusChanged." + engine, false)) {
+ // The enabled status being changed back to what it was before.
+ Svc.PrefBranch.clearUserPref("engineStatusChanged." + engine);
+ } else {
+ // Remember that the engine status changed locally until the next sync.
+ Svc.PrefBranch.setBoolPref("engineStatusChanged." + engine, true);
+ }
+ },
+
+ _startTracking() {
+ const engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ for (let engine of engines) {
+ try {
+ engine.startTracking();
+ } catch (e) {
+ this._log.error(`Could not start ${engine.name} engine tracker`, e);
+ }
+ }
+ // This is for TPS. We should try to do better.
+ Svc.Obs.notify("weave:service:tracking-started");
+ },
+
+ async _stopTracking() {
+ const engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ for (let engine of engines) {
+ try {
+ await engine.stopTracking();
+ } catch (e) {
+ this._log.error(`Could not stop ${engine.name} engine tracker`, e);
+ }
+ }
+ Svc.Obs.notify("weave:service:tracking-stopped");
+ },
+
+ /**
+ * Obtain a Resource instance with authentication credentials.
+ */
+ resource: function resource(url) {
+ let res = new Resource(url);
+ res.authenticator = this.identity.getResourceAuthenticator();
+
+ return res;
+ },
+
+ /**
+ * Perform the info fetch as part of a login or key fetch, or
+ * inside engine sync.
+ */
+ async _fetchInfo(url) {
+ let infoURL = url || this.infoURL;
+
+ this._log.trace("In _fetchInfo: " + infoURL);
+ let info;
+ try {
+ info = await this.resource(infoURL).get();
+ } catch (ex) {
+ this.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+
+ // Always check for errors.
+ this.errorHandler.checkServerError(info);
+ if (!info.success) {
+ this._log.error("Aborting sync: failed to get collections.");
+ throw info;
+ }
+ return info;
+ },
+
+ async verifyAndFetchSymmetricKeys(infoResponse) {
+ this._log.debug(
+ "Fetching and verifying -- or generating -- symmetric keys."
+ );
+
+ let syncKeyBundle = this.identity.syncKeyBundle;
+ if (!syncKeyBundle) {
+ this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
+ this.status.sync = CREDENTIALS_CHANGED;
+ return false;
+ }
+
+ try {
+ if (!infoResponse) {
+ infoResponse = await this._fetchInfo(); // Will throw an exception on failure.
+ }
+
+ // This only applies when the server is already at version 4.
+ if (infoResponse.status != 200) {
+ this._log.warn(
+ "info/collections returned non-200 response. Failing key fetch."
+ );
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(infoResponse);
+ return false;
+ }
+
+ let infoCollections = infoResponse.obj;
+
+ this._log.info(
+ "Testing info/collections: " + JSON.stringify(infoCollections)
+ );
+
+ if (this.collectionKeys.updateNeeded(infoCollections)) {
+ this._log.info("collection keys reports that a key update is needed.");
+
+ // Don't always set to CREDENTIALS_CHANGED -- we will probably take care of this.
+
+ // Fetch storage/crypto/keys.
+ let cryptoKeys;
+
+ if (infoCollections && CRYPTO_COLLECTION in infoCollections) {
+ try {
+ cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let cryptoResp = (
+ await cryptoKeys.fetch(this.resource(this.cryptoKeysURL))
+ ).response;
+
+ if (cryptoResp.success) {
+ await this.handleFetchedKeys(syncKeyBundle, cryptoKeys);
+ return true;
+ } else if (cryptoResp.status == 404) {
+ // On failure, ask to generate new keys and upload them.
+ // Fall through to the behavior below.
+ this._log.warn(
+ "Got 404 for crypto/keys, but 'crypto' in info/collections. Regenerating."
+ );
+ cryptoKeys = null;
+ } else {
+ // Some other problem.
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(cryptoResp);
+ this._log.warn(
+ "Got status " + cryptoResp.status + " fetching crypto keys."
+ );
+ return false;
+ }
+ } catch (ex) {
+ this._log.warn("Got exception fetching cryptoKeys.", ex);
+ // TODO: Um, what exceptions might we get here? Should we re-throw any?
+
+ // One kind of exception: HMAC failure.
+ if (Utils.isHMACMismatch(ex)) {
+ this.status.login = LOGIN_FAILED_INVALID_PASSPHRASE;
+ this.status.sync = CREDENTIALS_CHANGED;
+ } else {
+ // In the absence of further disambiguation or more precise
+ // failure constants, just report failure.
+ this.status.login = LOGIN_FAILED;
+ }
+ return false;
+ }
+ } else {
+ this._log.info(
+ "... 'crypto' is not a reported collection. Generating new keys."
+ );
+ }
+
+ if (!cryptoKeys) {
+ this._log.info("No keys! Generating new ones.");
+
+ // Better make some and upload them, and wipe the server to ensure
+ // consistency. This is all achieved via _freshStart.
+ // If _freshStart fails to clear the server or upload keys, it will
+ // throw.
+ await this._freshStart();
+ return true;
+ }
+
+ // Last-ditch case.
+ return false;
+ }
+ // No update needed: we're good!
+ return true;
+ } catch (ex) {
+ // This means no keys are present, or there's a network error.
+ this._log.debug("Failed to fetch and verify keys", ex);
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+ },
+
+ getMaxRecordPayloadSize() {
+ let config = this.serverConfiguration;
+ if (!config || !config.max_record_payload_bytes) {
+ this._log.warn(
+ "No config or incomplete config in getMaxRecordPayloadSize." +
+ " Are we running tests?"
+ );
+ return 256 * 1024;
+ }
+ let payloadMax = config.max_record_payload_bytes;
+ if (config.max_post_bytes && payloadMax <= config.max_post_bytes) {
+ return config.max_post_bytes - 4096;
+ }
+ return payloadMax;
+ },
+
+ getMemcacheMaxRecordPayloadSize() {
+ // Collections stored in memcached ("tabs", "clients" or "meta") have a
+ // different max size than ones stored in the normal storage server db.
+ // In practice, the real limit here is 1M (bug 1300451 comment 40), but
+ // there's overhead involved that is hard to calculate on the client, so we
+ // use 512k to be safe (at the recommendation of the server team). Note
+ // that if the server reports a lower limit (via info/configuration), we
+ // respect that limit instead. See also bug 1403052.
+ return Math.min(512 * 1024, this.getMaxRecordPayloadSize());
+ },
+
+ async verifyLogin(allow40XRecovery = true) {
+ // Attaching auth credentials to a request requires access to
+ // passwords, which means that Resource.get can throw MP-related
+ // exceptions!
+ // So we ask the identity to verify the login state after unlocking the
+ // master password (ie, this call is expected to prompt for MP unlock
+ // if necessary) while we still have control.
+ this.status.login = await this.identity.unlockAndVerifyAuthState();
+ this._log.debug(
+ "Fetching unlocked auth state returned " + this.status.login
+ );
+ if (this.status.login != STATUS_OK) {
+ return false;
+ }
+
+ try {
+ // Make sure we have a cluster to verify against.
+ // This is a little weird, if we don't get a node we pretend
+ // to succeed, since that probably means we just don't have storage.
+ if (this.clusterURL == "" && !(await this.identity.setCluster())) {
+ this.status.sync = NO_SYNC_NODE_FOUND;
+ return true;
+ }
+
+ // Fetch collection info on every startup.
+ let test = await this.resource(this.infoURL).get();
+
+ switch (test.status) {
+ case 200:
+ // The user is authenticated.
+
+ // We have no way of verifying the passphrase right now,
+ // so wait until remoteSetup to do so.
+ // Just make the most trivial checks.
+ if (!this.identity.syncKeyBundle) {
+ this._log.warn("No passphrase in verifyLogin.");
+ this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
+ return false;
+ }
+
+ // Go ahead and do remote setup, so that we can determine
+ // conclusively that our passphrase is correct.
+ if (await this._remoteSetup(test)) {
+ // Username/password verified.
+ this.status.login = LOGIN_SUCCEEDED;
+ return true;
+ }
+
+ this._log.warn("Remote setup failed.");
+ // Remote setup must have failed.
+ return false;
+
+ case 401:
+ this._log.warn("401: login failed.");
+ // Fall through to the 404 case.
+
+ case 404:
+ // Check that we're verifying with the correct cluster
+ if (allow40XRecovery && (await this.identity.setCluster())) {
+ return await this.verifyLogin(false);
+ }
+
+ // We must have the right cluster, but the server doesn't expect us.
+ // For FxA this almost certainly means "transient error fetching token".
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ return false;
+
+ default:
+ // Server didn't respond with something that we expected
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(test);
+ return false;
+ }
+ } catch (ex) {
+ // Must have failed on some network issue
+ this._log.debug("verifyLogin failed", ex);
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+ },
+
+ async generateNewSymmetricKeys() {
+ this._log.info("Generating new keys WBO...");
+ let wbo = await this.collectionKeys.generateNewKeysWBO();
+ this._log.info("Encrypting new key bundle.");
+ await wbo.encrypt(this.identity.syncKeyBundle);
+
+ let uploadRes = await this._uploadCryptoKeys(wbo, 0);
+ if (uploadRes.status != 200) {
+ this._log.warn(
+ "Got status " +
+ uploadRes.status +
+ " uploading new keys. What to do? Throw!"
+ );
+ this.errorHandler.checkServerError(uploadRes);
+ throw new Error("Unable to upload symmetric keys.");
+ }
+ this._log.info("Got status " + uploadRes.status + " uploading keys.");
+ let serverModified = uploadRes.obj; // Modified timestamp according to server.
+ this._log.debug("Server reports crypto modified: " + serverModified);
+
+ // Now verify that info/collections shows them!
+ this._log.debug("Verifying server collection records.");
+ let info = await this._fetchInfo();
+ this._log.debug("info/collections is: " + info.data);
+
+ if (info.status != 200) {
+ this._log.warn("Non-200 info/collections response. Aborting.");
+ throw new Error("Unable to upload symmetric keys.");
+ }
+
+ info = info.obj;
+ if (!(CRYPTO_COLLECTION in info)) {
+ this._log.error(
+ "Consistency failure: info/collections excludes " +
+ "crypto after successful upload."
+ );
+ throw new Error("Symmetric key upload failed.");
+ }
+
+ // Can't check against local modified: clock drift.
+ if (info[CRYPTO_COLLECTION] < serverModified) {
+ this._log.error(
+ "Consistency failure: info/collections crypto entry " +
+ "is stale after successful upload."
+ );
+ throw new Error("Symmetric key upload failed.");
+ }
+
+ // Doesn't matter if the timestamp is ahead.
+
+ // Download and install them.
+ let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let cryptoResp = (await cryptoKeys.fetch(this.resource(this.cryptoKeysURL)))
+ .response;
+ if (cryptoResp.status != 200) {
+ this._log.warn("Failed to download keys.");
+ throw new Error("Symmetric key download failed.");
+ }
+ let keysChanged = await this.handleFetchedKeys(
+ this.identity.syncKeyBundle,
+ cryptoKeys,
+ true
+ );
+ if (keysChanged) {
+ this._log.info("Downloaded keys differed, as expected.");
+ }
+ },
+
+ // configures/enabled/turns-on sync. There must be an FxA user signed in.
+ async configure() {
+ // We don't, and must not, throw if sync is already configured, because we
+ // might end up being called as part of a "reconnect" flow. We also want to
+ // avoid checking the FxA user is the same as the pref because the email
+ // address for the FxA account can change - we'd need to use the uid.
+ let user = await fxAccounts.getSignedInUser();
+ if (!user) {
+ throw new Error("No FxA user is signed in");
+ }
+ this._log.info("Configuring sync with current FxA user");
+ Svc.PrefBranch.setStringPref("username", user.email);
+ Svc.Obs.notify("weave:connected");
+ },
+
+ // resets/turns-off sync.
+ async startOver() {
+ this._log.trace("Invoking Service.startOver.");
+ await this._stopTracking();
+ this.status.resetSync();
+
+ // Deletion doesn't make sense if we aren't set up yet!
+ if (this.clusterURL != "") {
+ // Clear client-specific data from the server, including disabled engines.
+ const engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ for (let engine of engines) {
+ try {
+ await engine.removeClientData();
+ } catch (ex) {
+ this._log.warn(`Deleting client data for ${engine.name} failed`, ex);
+ }
+ }
+ this._log.debug("Finished deleting client data.");
+ } else {
+ this._log.debug("Skipping client data removal: no cluster URL.");
+ }
+
+ this.identity.resetCredentials();
+ this.status.login = LOGIN_FAILED_NO_USERNAME;
+ this.logout();
+ Svc.Obs.notify("weave:service:start-over");
+
+ // Reset all engines and clear keys.
+ await this.resetClient();
+ this.collectionKeys.clear();
+ this.status.resetBackoff();
+
+ // Reset Weave prefs.
+ this._ignorePrefObserver = true;
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ this._ignorePrefObserver = false;
+ this.clusterURL = null;
+
+ Svc.PrefBranch.setStringPref("lastversion", WEAVE_VERSION);
+
+ try {
+ this.identity.finalize();
+ this.status.__authManager = null;
+ this.identity = Status._authManager;
+ Svc.Obs.notify("weave:service:start-over:finish");
+ } catch (err) {
+ this._log.error(
+ "startOver failed to re-initialize the identity manager",
+ err
+ );
+ // Still send the observer notification so the current state is
+ // reflected in the UI.
+ Svc.Obs.notify("weave:service:start-over:finish");
+ }
+ },
+
+ async login() {
+ async function onNotify() {
+ this._loggedIn = false;
+ if (this.scheduler.offline) {
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ throw new Error("Application is offline, login should not be called");
+ }
+
+ this._log.info("User logged in successfully - verifying login.");
+ if (!(await this.verifyLogin())) {
+ // verifyLogin sets the failure states here.
+ throw new Error(`Login failed: ${this.status.login}`);
+ }
+
+ this._updateCachedURLs();
+
+ this._loggedIn = true;
+
+ return true;
+ }
+
+ let notifier = this._notify("login", "", onNotify.bind(this));
+ return this._catch(this._lock("service.js: login", notifier))();
+ },
+
+ logout: function logout() {
+ // If we failed during login, we aren't going to have this._loggedIn set,
+ // but we still want to ask the identity to logout, so it doesn't try and
+ // reuse any old credentials next time we sync.
+ this._log.info("Logging out");
+ this.identity.logout();
+ this._loggedIn = false;
+
+ Svc.Obs.notify("weave:service:logout:finish");
+ },
+
+ // Note: returns false if we failed for a reason other than the server not yet
+ // supporting the api.
+ async _fetchServerConfiguration() {
+ // This is similar to _fetchInfo, but with different error handling.
+
+ let infoURL = this.userBaseURL + "info/configuration";
+ this._log.debug("Fetching server configuration", infoURL);
+ let configResponse;
+ try {
+ configResponse = await this.resource(infoURL).get();
+ } catch (ex) {
+ // This is probably a network or similar error.
+ this._log.warn("Failed to fetch info/configuration", ex);
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+
+ if (configResponse.status == 404) {
+ // This server doesn't support the URL yet - that's OK.
+ this._log.debug(
+ "info/configuration returned 404 - using default upload semantics"
+ );
+ } else if (configResponse.status != 200) {
+ this._log.warn(
+ `info/configuration returned ${configResponse.status} - using default configuration`
+ );
+ this.errorHandler.checkServerError(configResponse);
+ return false;
+ } else {
+ this.serverConfiguration = configResponse.obj;
+ }
+ this._log.trace(
+ "info/configuration for this server",
+ this.serverConfiguration
+ );
+ return true;
+ },
+
+ // Stuff we need to do after login, before we can really do
+ // anything (e.g. key setup).
+ async _remoteSetup(infoResponse, fetchConfig = true) {
+ if (fetchConfig && !(await this._fetchServerConfiguration())) {
+ return false;
+ }
+
+ this._log.debug("Fetching global metadata record");
+ let meta = await this.recordManager.get(this.metaURL);
+
+ // Checking modified time of the meta record.
+ if (
+ infoResponse &&
+ infoResponse.obj.meta != this.metaModified &&
+ (!meta || !meta.isNew)
+ ) {
+ // Delete the cached meta record...
+ this._log.debug(
+ "Clearing cached meta record. metaModified is " +
+ JSON.stringify(this.metaModified) +
+ ", setting to " +
+ JSON.stringify(infoResponse.obj.meta)
+ );
+
+ this.recordManager.del(this.metaURL);
+
+ // ... fetch the current record from the server, and COPY THE FLAGS.
+ let newMeta = await this.recordManager.get(this.metaURL);
+
+ // If we got a 401, we do not want to create a new meta/global - we
+ // should be able to get the existing meta after we get a new node.
+ if (this.recordManager.response.status == 401) {
+ this._log.debug(
+ "Fetching meta/global record on the server returned 401."
+ );
+ this.errorHandler.checkServerError(this.recordManager.response);
+ return false;
+ }
+
+ if (this.recordManager.response.status == 404) {
+ this._log.debug("No meta/global record on the server. Creating one.");
+ try {
+ await this._uploadNewMetaGlobal();
+ } catch (uploadRes) {
+ this._log.warn(
+ "Unable to upload new meta/global. Failing remote setup."
+ );
+ this.errorHandler.checkServerError(uploadRes);
+ return false;
+ }
+ } else if (!newMeta) {
+ this._log.warn("Unable to get meta/global. Failing remote setup.");
+ this.errorHandler.checkServerError(this.recordManager.response);
+ return false;
+ } else {
+ // If newMeta, then it stands to reason that meta != null.
+ newMeta.isNew = meta.isNew;
+ newMeta.changed = meta.changed;
+ }
+
+ // Switch in the new meta object and record the new time.
+ meta = newMeta;
+ this.metaModified = infoResponse.obj.meta;
+ }
+
+ let remoteVersion =
+ meta && meta.payload.storageVersion ? meta.payload.storageVersion : "";
+
+ this._log.debug(
+ [
+ "Weave Version:",
+ WEAVE_VERSION,
+ "Local Storage:",
+ STORAGE_VERSION,
+ "Remote Storage:",
+ remoteVersion,
+ ].join(" ")
+ );
+
+ // Check for cases that require a fresh start. When comparing remoteVersion,
+ // we need to convert it to a number as older clients used it as a string.
+ if (
+ !meta ||
+ !meta.payload.storageVersion ||
+ !meta.payload.syncID ||
+ STORAGE_VERSION > parseFloat(remoteVersion)
+ ) {
+ this._log.info(
+ "One of: no meta, no meta storageVersion, or no meta syncID. Fresh start needed."
+ );
+
+ // abort the server wipe if the GET status was anything other than 404 or 200
+ let status = this.recordManager.response.status;
+ if (status != 200 && status != 404) {
+ this.status.sync = METARECORD_DOWNLOAD_FAIL;
+ this.errorHandler.checkServerError(this.recordManager.response);
+ this._log.warn(
+ "Unknown error while downloading metadata record. Aborting sync."
+ );
+ return false;
+ }
+
+ if (!meta) {
+ this._log.info("No metadata record, server wipe needed");
+ }
+ if (meta && !meta.payload.syncID) {
+ this._log.warn("No sync id, server wipe needed");
+ }
+
+ this._log.info("Wiping server data");
+ await this._freshStart();
+
+ if (status == 404) {
+ this._log.info(
+ "Metadata record not found, server was wiped to ensure " +
+ "consistency."
+ );
+ } else {
+ // 200
+ this._log.info("Wiped server; incompatible metadata: " + remoteVersion);
+ }
+ return true;
+ } else if (remoteVersion > STORAGE_VERSION) {
+ this.status.sync = VERSION_OUT_OF_DATE;
+ this._log.warn("Upgrade required to access newer storage version.");
+ return false;
+ } else if (meta.payload.syncID != this.syncID) {
+ this._log.info(
+ "Sync IDs differ. Local is " +
+ this.syncID +
+ ", remote is " +
+ meta.payload.syncID
+ );
+ await this.resetClient();
+ this.collectionKeys.clear();
+ this.syncID = meta.payload.syncID;
+ this._log.debug("Clear cached values and take syncId: " + this.syncID);
+
+ if (!(await this.verifyAndFetchSymmetricKeys(infoResponse))) {
+ this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
+ return false;
+ }
+
+ // bug 545725 - re-verify creds and fail sanely
+ if (!(await this.verifyLogin())) {
+ this.status.sync = CREDENTIALS_CHANGED;
+ this._log.info(
+ "Credentials have changed, aborting sync and forcing re-login."
+ );
+ return false;
+ }
+
+ return true;
+ }
+ if (!(await this.verifyAndFetchSymmetricKeys(infoResponse))) {
+ this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
+ return false;
+ }
+
+ return true;
+ },
+
+ /**
+ * Return whether we should attempt login at the start of a sync.
+ *
+ * Note that this function has strong ties to _checkSync: callers
+ * of this function should typically use _checkSync to verify that
+ * any necessary login took place.
+ */
+ _shouldLogin: function _shouldLogin() {
+ return (
+ this.enabled &&
+ !this.scheduler.offline &&
+ !this.isLoggedIn &&
+ Async.isAppReady()
+ );
+ },
+
+ /**
+ * Determine if a sync should run.
+ *
+ * @param ignore [optional]
+ * array of reasons to ignore when checking
+ *
+ * @return Reason for not syncing; not-truthy if sync should run
+ */
+ _checkSync: function _checkSync(ignore) {
+ let reason = "";
+ // Ideally we'd call _checkSetup() here but that has too many side-effects.
+ if (Status.service == CLIENT_NOT_CONFIGURED) {
+ reason = kSyncNotConfigured;
+ } else if (Status.service == STATUS_DISABLED || !this.enabled) {
+ reason = kSyncWeaveDisabled;
+ } else if (this.scheduler.offline) {
+ reason = kSyncNetworkOffline;
+ } else if (this.status.minimumNextSync > Date.now()) {
+ reason = kSyncBackoffNotMet;
+ } else if (
+ this.status.login == MASTER_PASSWORD_LOCKED &&
+ Utils.mpLocked()
+ ) {
+ reason = kSyncMasterPasswordLocked;
+ } else if (Svc.PrefBranch.getStringPref("firstSync", null) == "notReady") {
+ reason = kFirstSyncChoiceNotMade;
+ } else if (!Async.isAppReady()) {
+ reason = kFirefoxShuttingDown;
+ }
+
+ if (ignore && ignore.includes(reason)) {
+ return "";
+ }
+
+ return reason;
+ },
+
+ async sync({ engines, why } = {}) {
+ let dateStr = Utils.formatTimestamp(new Date());
+ this._log.debug("User-Agent: " + Utils.userAgent);
+ await this.promiseInitialized;
+ this._log.info(
+ `Starting sync at ${dateStr} in browser session ${lazy.browserSessionID}`
+ );
+ return this._catch(async function () {
+ // Make sure we're logged in.
+ if (this._shouldLogin()) {
+ this._log.debug("In sync: should login.");
+ if (!(await this.login())) {
+ this._log.debug("Not syncing: login returned false.");
+ return;
+ }
+ } else {
+ this._log.trace("In sync: no need to login.");
+ }
+ await this._lockedSync(engines, why);
+ })();
+ },
+
+ /**
+ * Sync up engines with the server.
+ */
+ async _lockedSync(engineNamesToSync, why) {
+ return this._lock(
+ "service.js: sync",
+ this._notify("sync", JSON.stringify({ why }), async function onNotify() {
+ let histogram =
+ Services.telemetry.getHistogramById("WEAVE_START_COUNT");
+ histogram.add(1);
+
+ let synchronizer = new EngineSynchronizer(this);
+ await synchronizer.sync(engineNamesToSync, why); // Might throw!
+
+ histogram = Services.telemetry.getHistogramById(
+ "WEAVE_COMPLETE_SUCCESS_COUNT"
+ );
+ histogram.add(1);
+
+ // We successfully synchronized.
+ // Check if the identity wants to pre-fetch a migration sentinel from
+ // the server.
+ // If we have no clusterURL, we are probably doing a node reassignment
+ // so don't attempt to get it in that case.
+ if (this.clusterURL) {
+ this.identity.prefetchMigrationSentinel(this);
+ }
+
+ // Now let's update our declined engines
+ await this._maybeUpdateDeclined();
+ })
+ )();
+ },
+
+ /**
+ * Update the "declined" information in meta/global if necessary.
+ */
+ async _maybeUpdateDeclined() {
+ // if Sync failed due to no node we will not have a meta URL, so can't
+ // update anything.
+ if (!this.metaURL) {
+ return;
+ }
+ let meta = await this.recordManager.get(this.metaURL);
+ if (!meta) {
+ this._log.warn("No meta/global; can't update declined state.");
+ return;
+ }
+
+ let declinedEngines = new DeclinedEngines(this);
+ let didChange = declinedEngines.updateDeclined(meta, this.engineManager);
+ if (!didChange) {
+ this._log.info(
+ "No change to declined engines. Not reuploading meta/global."
+ );
+ return;
+ }
+
+ await this.uploadMetaGlobal(meta);
+ },
+
+ /**
+ * Upload a fresh meta/global record
+ * @throws the response object if the upload request was not a success
+ */
+ async _uploadNewMetaGlobal() {
+ let meta = new WBORecord("meta", "global");
+ meta.payload.syncID = this.syncID;
+ meta.payload.storageVersion = STORAGE_VERSION;
+ meta.payload.declined = this.engineManager.getDeclined();
+ meta.modified = 0;
+ meta.isNew = true;
+
+ await this.uploadMetaGlobal(meta);
+ },
+
+ /**
+ * Upload meta/global, throwing the response on failure
+ * @param {WBORecord} meta meta/global record
+ * @throws the response object if the request was not a success
+ */
+ async uploadMetaGlobal(meta) {
+ this._log.debug("Uploading meta/global", meta);
+ let res = this.resource(this.metaURL);
+ res.setHeader("X-If-Unmodified-Since", meta.modified);
+ let response = await res.put(meta);
+ if (!response.success) {
+ throw response;
+ }
+ // From https://docs.services.mozilla.com/storage/apis-1.5.html:
+ // "Successful responses will return the new last-modified time for the collection."
+ meta.modified = response.obj;
+ this.recordManager.set(this.metaURL, meta);
+ },
+
+ /**
+ * Upload crypto/keys
+ * @param {WBORecord} cryptoKeys crypto/keys record
+ * @param {Number} lastModified known last modified timestamp (in decimal seconds),
+ * will be used to set the X-If-Unmodified-Since header
+ */
+ async _uploadCryptoKeys(cryptoKeys, lastModified) {
+ this._log.debug(`Uploading crypto/keys (lastModified: ${lastModified})`);
+ let res = this.resource(this.cryptoKeysURL);
+ res.setHeader("X-If-Unmodified-Since", lastModified);
+ return res.put(cryptoKeys);
+ },
+
+ async _freshStart() {
+ this._log.info("Fresh start. Resetting client.");
+ await this.resetClient();
+ this.collectionKeys.clear();
+
+ // Wipe the server.
+ await this.wipeServer();
+
+ // Upload a new meta/global record.
+ // _uploadNewMetaGlobal throws on failure -- including race conditions.
+ // If we got into a race condition, we'll abort the sync this way, too.
+ // That's fine. We'll just wait till the next sync. The client that we're
+ // racing is probably busy uploading stuff right now anyway.
+ await this._uploadNewMetaGlobal();
+
+ // Wipe everything we know about except meta because we just uploaded it
+ // TODO: there's a bug here. We should be calling resetClient, no?
+
+ // Generate, upload, and download new keys. Do this last so we don't wipe
+ // them...
+ await this.generateNewSymmetricKeys();
+ },
+
+ /**
+ * Wipe user data from the server.
+ *
+ * @param collections [optional]
+ * Array of collections to wipe. If not given, all collections are
+ * wiped by issuing a DELETE request for `storageURL`.
+ *
+ * @return the server's timestamp of the (last) DELETE.
+ */
+ async wipeServer(collections) {
+ let response;
+ let histogram = Services.telemetry.getHistogramById(
+ "WEAVE_WIPE_SERVER_SUCCEEDED"
+ );
+ if (!collections) {
+ // Strip the trailing slash.
+ let res = this.resource(this.storageURL.slice(0, -1));
+ res.setHeader("X-Confirm-Delete", "1");
+ try {
+ response = await res.delete();
+ } catch (ex) {
+ this._log.debug("Failed to wipe server", ex);
+ histogram.add(false);
+ throw ex;
+ }
+ if (response.status != 200 && response.status != 404) {
+ this._log.debug(
+ "Aborting wipeServer. Server responded with " +
+ response.status +
+ " response for " +
+ this.storageURL
+ );
+ histogram.add(false);
+ throw response;
+ }
+ histogram.add(true);
+ return response.headers["x-weave-timestamp"];
+ }
+
+ let timestamp;
+ for (let name of collections) {
+ let url = this.storageURL + name;
+ try {
+ response = await this.resource(url).delete();
+ } catch (ex) {
+ this._log.debug("Failed to wipe '" + name + "' collection", ex);
+ histogram.add(false);
+ throw ex;
+ }
+
+ if (response.status != 200 && response.status != 404) {
+ this._log.debug(
+ "Aborting wipeServer. Server responded with " +
+ response.status +
+ " response for " +
+ url
+ );
+ histogram.add(false);
+ throw response;
+ }
+
+ if ("x-weave-timestamp" in response.headers) {
+ timestamp = response.headers["x-weave-timestamp"];
+ }
+ }
+ histogram.add(true);
+ return timestamp;
+ },
+
+ /**
+ * Wipe all local user data.
+ *
+ * @param engines [optional]
+ * Array of engine names to wipe. If not given, all engines are used.
+ */
+ async wipeClient(engines) {
+ // If we don't have any engines, reset the service and wipe all engines
+ if (!engines) {
+ // Clear out any service data
+ await this.resetService();
+
+ engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ } else {
+ // Convert the array of names into engines
+ engines = this.engineManager.get(engines);
+ }
+
+ // Fully wipe each engine if it's able to decrypt data
+ for (let engine of engines) {
+ if (await engine.canDecrypt()) {
+ await engine.wipeClient();
+ }
+ }
+ },
+
+ /**
+ * Wipe all remote user data by wiping the server then telling each remote
+ * client to wipe itself.
+ *
+ * @param engines
+ * Array of engine names to wipe.
+ */
+ async wipeRemote(engines) {
+ try {
+ // Make sure stuff gets uploaded.
+ await this.resetClient(engines);
+
+ // Clear out any server data.
+ await this.wipeServer(engines);
+
+ // Only wipe the engines provided.
+ let extra = { reason: "wipe-remote" };
+ for (const e of engines) {
+ await this.clientsEngine.sendCommand("wipeEngine", [e], null, extra);
+ }
+
+ // Make sure the changed clients get updated.
+ await this.clientsEngine.sync();
+ } catch (ex) {
+ this.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+ },
+
+ /**
+ * Reset local service information like logs, sync times, caches.
+ */
+ async resetService() {
+ return this._catch(async function reset() {
+ this._log.info("Service reset.");
+
+ // Pretend we've never synced to the server and drop cached data
+ this.syncID = "";
+ this.recordManager.clearCache();
+ })();
+ },
+
+ /**
+ * Reset the client by getting rid of any local server data and client data.
+ *
+ * @param engines [optional]
+ * Array of engine names to reset. If not given, all engines are used.
+ */
+ async resetClient(engines) {
+ return this._catch(async function doResetClient() {
+ // If we don't have any engines, reset everything including the service
+ if (!engines) {
+ // Clear out any service data
+ await this.resetService();
+
+ engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ } else {
+ // Convert the array of names into engines
+ engines = this.engineManager.get(engines);
+ }
+
+ // Have each engine drop any temporary meta data
+ for (let engine of engines) {
+ await engine.resetClient();
+ }
+ })();
+ },
+
+ recordTelemetryEvent(object, method, value, extra = undefined) {
+ Svc.Obs.notify("weave:telemetry:event", { object, method, value, extra });
+ },
+};
+
+export var Service = new Sync11Service();
+Service.promiseInitialized = new Promise(resolve => {
+ Service.onStartup().then(resolve);
+});
diff --git a/services/sync/modules/stages/declined.sys.mjs b/services/sync/modules/stages/declined.sys.mjs
new file mode 100644
index 0000000000..2c74aab117
--- /dev/null
+++ b/services/sync/modules/stages/declined.sys.mjs
@@ -0,0 +1,78 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains code for maintaining the set of declined engines,
+ * in conjunction with EngineManager.
+ */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { Observers } from "resource://services-common/observers.sys.mjs";
+
+export var DeclinedEngines = function (service) {
+ this._log = Log.repository.getLogger("Sync.Declined");
+ this._log.manageLevelFromPref("services.sync.log.logger.declined");
+
+ this.service = service;
+};
+
+DeclinedEngines.prototype = {
+ updateDeclined(meta, engineManager = this.service.engineManager) {
+ let enabled = new Set(engineManager.getEnabled().map(e => e.name));
+ let known = new Set(engineManager.getAll().map(e => e.name));
+ let remoteDeclined = new Set(meta.payload.declined || []);
+ let localDeclined = new Set(engineManager.getDeclined());
+
+ this._log.debug(
+ "Handling remote declined: " + JSON.stringify([...remoteDeclined])
+ );
+ this._log.debug(
+ "Handling local declined: " + JSON.stringify([...localDeclined])
+ );
+
+ // Any engines that are locally enabled should be removed from the remote
+ // declined list.
+ //
+ // Any engines that are locally declined should be added to the remote
+ // declined list.
+ let newDeclined = CommonUtils.union(
+ localDeclined,
+ CommonUtils.difference(remoteDeclined, enabled)
+ );
+
+ // If our declined set has changed, put it into the meta object and mark
+ // it as changed.
+ let declinedChanged = !CommonUtils.setEqual(newDeclined, remoteDeclined);
+ this._log.debug("Declined changed? " + declinedChanged);
+ if (declinedChanged) {
+ meta.changed = true;
+ meta.payload.declined = [...newDeclined];
+ }
+
+ // Update the engine manager regardless.
+ engineManager.setDeclined(newDeclined);
+
+ // Any engines that are locally known, locally disabled, and not remotely
+ // or locally declined, are candidates for enablement.
+ let undecided = CommonUtils.difference(
+ CommonUtils.difference(known, enabled),
+ newDeclined
+ );
+ if (undecided.size) {
+ let subject = {
+ declined: newDeclined,
+ enabled,
+ known,
+ undecided,
+ };
+ CommonUtils.nextTick(() => {
+ Observers.notify("weave:engines:notdeclined", subject);
+ });
+ }
+
+ return declinedChanged;
+ },
+};
diff --git a/services/sync/modules/stages/enginesync.sys.mjs b/services/sync/modules/stages/enginesync.sys.mjs
new file mode 100644
index 0000000000..190fd1f5fa
--- /dev/null
+++ b/services/sync/modules/stages/enginesync.sys.mjs
@@ -0,0 +1,412 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains code for synchronizing engines.
+ */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import {
+ ABORT_SYNC_COMMAND,
+ LOGIN_FAILED_NETWORK_ERROR,
+ NO_SYNC_NODE_FOUND,
+ STATUS_OK,
+ SYNC_FAILED_PARTIAL,
+ SYNC_SUCCEEDED,
+ WEAVE_VERSION,
+ kSyncNetworkOffline,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+
+const lazy = {};
+ChromeUtils.defineESModuleGetters(lazy, {
+ Doctor: "resource://services-sync/doctor.sys.mjs",
+});
+
+/**
+ * Perform synchronization of engines.
+ *
+ * This was originally split out of service.js. The API needs lots of love.
+ */
+export function EngineSynchronizer(service) {
+ this._log = Log.repository.getLogger("Sync.Synchronizer");
+ this._log.manageLevelFromPref("services.sync.log.logger.synchronizer");
+
+ this.service = service;
+}
+
+EngineSynchronizer.prototype = {
+ async sync(engineNamesToSync, why) {
+ let fastSync = why && why == "sleep";
+ let startTime = Date.now();
+
+ this.service.status.resetSync();
+
+ // Make sure we should sync or record why we shouldn't.
+ let reason = this.service._checkSync();
+ if (reason) {
+ if (reason == kSyncNetworkOffline) {
+ this.service.status.sync = LOGIN_FAILED_NETWORK_ERROR;
+ }
+
+ // this is a purposeful abort rather than a failure, so don't set
+ // any status bits
+ reason = "Can't sync: " + reason;
+ throw new Error(reason);
+ }
+
+ // If we don't have a node, get one. If that fails, retry in 10 minutes.
+ if (
+ !this.service.clusterURL &&
+ !(await this.service.identity.setCluster())
+ ) {
+ this.service.status.sync = NO_SYNC_NODE_FOUND;
+ this._log.info("No cluster URL found. Cannot sync.");
+ return;
+ }
+
+ // Ping the server with a special info request once a day.
+ let infoURL = this.service.infoURL;
+ let now = Math.floor(Date.now() / 1000);
+ let lastPing = Svc.PrefBranch.getIntPref("lastPing", 0);
+ if (now - lastPing > 86400) {
+ // 60 * 60 * 24
+ infoURL += "?v=" + WEAVE_VERSION;
+ Svc.PrefBranch.setIntPref("lastPing", now);
+ }
+
+ let engineManager = this.service.engineManager;
+
+ // Figure out what the last modified time is for each collection
+ let info = await this.service._fetchInfo(infoURL);
+
+ // Convert the response to an object and read out the modified times
+ for (let engine of [this.service.clientsEngine].concat(
+ engineManager.getAll()
+ )) {
+ engine.lastModified = info.obj[engine.name] || 0;
+ }
+
+ if (!(await this.service._remoteSetup(info, !fastSync))) {
+ throw new Error("Aborting sync, remote setup failed");
+ }
+
+ if (!fastSync) {
+ // Make sure we have an up-to-date list of clients before sending commands
+ this._log.debug("Refreshing client list.");
+ if (!(await this._syncEngine(this.service.clientsEngine))) {
+ // Clients is an engine like any other; it can fail with a 401,
+ // and we can elect to abort the sync.
+ this._log.warn("Client engine sync failed. Aborting.");
+ return;
+ }
+ }
+
+ // We only honor the "hint" of what engines to Sync if this isn't
+ // a first sync.
+ let allowEnginesHint = false;
+ // Wipe data in the desired direction if necessary
+ switch (Svc.PrefBranch.getStringPref("firstSync", null)) {
+ case "resetClient":
+ await this.service.resetClient(engineManager.enabledEngineNames);
+ break;
+ case "wipeClient":
+ await this.service.wipeClient(engineManager.enabledEngineNames);
+ break;
+ case "wipeRemote":
+ await this.service.wipeRemote(engineManager.enabledEngineNames);
+ break;
+ default:
+ allowEnginesHint = true;
+ break;
+ }
+
+ if (!fastSync && this.service.clientsEngine.localCommands) {
+ try {
+ if (!(await this.service.clientsEngine.processIncomingCommands())) {
+ this.service.status.sync = ABORT_SYNC_COMMAND;
+ throw new Error("Processed command aborted sync.");
+ }
+
+ // Repeat remoteSetup in-case the commands forced us to reset
+ if (!(await this.service._remoteSetup(info))) {
+ throw new Error("Remote setup failed after processing commands.");
+ }
+ } finally {
+ // Always immediately attempt to push back the local client (now
+ // without commands).
+ // Note that we don't abort here; if there's a 401 because we've
+ // been reassigned, we'll handle it around another engine.
+ await this._syncEngine(this.service.clientsEngine);
+ }
+ }
+
+ // Update engines because it might change what we sync.
+ try {
+ await this._updateEnabledEngines();
+ } catch (ex) {
+ this._log.debug("Updating enabled engines failed", ex);
+ this.service.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+
+ await this.service.engineManager.switchAlternatives();
+
+ // If the engines to sync has been specified, we sync in the order specified.
+ let enginesToSync;
+ if (allowEnginesHint && engineNamesToSync) {
+ this._log.info("Syncing specified engines", engineNamesToSync);
+ enginesToSync = engineManager
+ .get(engineNamesToSync)
+ .filter(e => e.enabled);
+ } else {
+ this._log.info("Syncing all enabled engines.");
+ enginesToSync = engineManager.getEnabled();
+ }
+ try {
+ // We don't bother validating engines that failed to sync.
+ let enginesToValidate = [];
+ for (let engine of enginesToSync) {
+ if (engine.shouldSkipSync(why)) {
+ this._log.info(`Engine ${engine.name} asked to be skipped`);
+ continue;
+ }
+ // If there's any problems with syncing the engine, report the failure
+ if (
+ !(await this._syncEngine(engine)) ||
+ this.service.status.enforceBackoff
+ ) {
+ this._log.info("Aborting sync for failure in " + engine.name);
+ break;
+ }
+ enginesToValidate.push(engine);
+ }
+
+ // If _syncEngine fails for a 401, we might not have a cluster URL here.
+ // If that's the case, break out of this immediately, rather than
+ // throwing an exception when trying to fetch metaURL.
+ if (!this.service.clusterURL) {
+ this._log.debug(
+ "Aborting sync, no cluster URL: not uploading new meta/global."
+ );
+ return;
+ }
+
+ // Upload meta/global if any engines changed anything.
+ let meta = await this.service.recordManager.get(this.service.metaURL);
+ if (meta.isNew || meta.changed) {
+ this._log.info("meta/global changed locally: reuploading.");
+ try {
+ await this.service.uploadMetaGlobal(meta);
+ delete meta.isNew;
+ delete meta.changed;
+ } catch (error) {
+ this._log.error(
+ "Unable to upload meta/global. Leaving marked as new."
+ );
+ }
+ }
+
+ if (!fastSync) {
+ await lazy.Doctor.consult(enginesToValidate);
+ }
+
+ // If there were no sync engine failures
+ if (this.service.status.service != SYNC_FAILED_PARTIAL) {
+ this.service.status.sync = SYNC_SUCCEEDED;
+ }
+
+ // Even if there were engine failures, bump lastSync even on partial since
+ // it's reflected in the UI (bug 1439777).
+ if (
+ this.service.status.service == SYNC_FAILED_PARTIAL ||
+ this.service.status.service == STATUS_OK
+ ) {
+ Svc.PrefBranch.setStringPref("lastSync", new Date().toString());
+ }
+ } finally {
+ Svc.PrefBranch.clearUserPref("firstSync");
+
+ let syncTime = ((Date.now() - startTime) / 1000).toFixed(2);
+ let dateStr = Utils.formatTimestamp(new Date());
+ this._log.info(
+ "Sync completed at " + dateStr + " after " + syncTime + " secs."
+ );
+ }
+ },
+
+ // Returns true if sync should proceed.
+ // false / no return value means sync should be aborted.
+ async _syncEngine(engine) {
+ try {
+ await engine.sync();
+ } catch (e) {
+ if (e.status == 401) {
+ // Maybe a 401, cluster update perhaps needed?
+ // We rely on ErrorHandler observing the sync failure notification to
+ // schedule another sync and clear node assignment values.
+ // Here we simply want to muffle the exception and return an
+ // appropriate value.
+ return false;
+ }
+ // Note that policies.js has already logged info about the exception...
+ if (Async.isShutdownException(e)) {
+ // Failure due to a shutdown exception should prevent other engines
+ // trying to start and immediately failing.
+ this._log.info(
+ `${engine.name} was interrupted by shutdown; no other engines will sync`
+ );
+ return false;
+ }
+ }
+
+ return true;
+ },
+
+ async _updateEnabledFromMeta(
+ meta,
+ numClients,
+ engineManager = this.service.engineManager
+ ) {
+ this._log.info("Updating enabled engines: " + numClients + " clients.");
+
+ if (meta.isNew || !meta.payload.engines) {
+ this._log.debug(
+ "meta/global isn't new, or is missing engines. Not updating enabled state."
+ );
+ return;
+ }
+
+ // If we're the only client, and no engines are marked as enabled,
+ // thumb our noses at the server data: it can't be right.
+ // Belt-and-suspenders approach to Bug 615926.
+ let hasEnabledEngines = false;
+ for (let e in meta.payload.engines) {
+ if (e != "clients") {
+ hasEnabledEngines = true;
+ break;
+ }
+ }
+
+ if (numClients <= 1 && !hasEnabledEngines) {
+ this._log.info(
+ "One client and no enabled engines: not touching local engine status."
+ );
+ return;
+ }
+
+ this.service._ignorePrefObserver = true;
+
+ let enabled = engineManager.enabledEngineNames;
+
+ let toDecline = new Set();
+ let toUndecline = new Set();
+
+ for (let engineName in meta.payload.engines) {
+ if (engineName == "clients") {
+ // Clients is special.
+ continue;
+ }
+ let index = enabled.indexOf(engineName);
+ if (index != -1) {
+ // The engine is enabled locally. Nothing to do.
+ enabled.splice(index, 1);
+ continue;
+ }
+ let engine = engineManager.get(engineName);
+ if (!engine) {
+ // The engine doesn't exist locally. Nothing to do.
+ continue;
+ }
+
+ let attemptedEnable = false;
+ // If the engine was enabled remotely, enable it locally.
+ if (
+ !Svc.PrefBranch.getBoolPref(
+ "engineStatusChanged." + engine.prefName,
+ false
+ )
+ ) {
+ this._log.trace(
+ "Engine " + engineName + " was enabled. Marking as non-declined."
+ );
+ toUndecline.add(engineName);
+ this._log.trace(engineName + " engine was enabled remotely.");
+ engine.enabled = true;
+ // Note that setting engine.enabled to true might not have worked for
+ // the password engine if a master-password is enabled. However, it's
+ // still OK that we added it to undeclined - the user *tried* to enable
+ // it remotely - so it still winds up as not being flagged as declined
+ // even though it's disabled remotely.
+ attemptedEnable = true;
+ }
+
+ // If either the engine was disabled locally or enabling the engine
+ // failed (see above re master-password) then wipe server data and
+ // disable it everywhere.
+ if (!engine.enabled) {
+ this._log.trace("Wiping data for " + engineName + " engine.");
+ await engine.wipeServer();
+ delete meta.payload.engines[engineName];
+ meta.changed = true; // the new enabled state must propagate
+ // We also here mark the engine as declined, because the pref
+ // was explicitly changed to false - unless we tried, and failed,
+ // to enable it - in which case we leave the declined state alone.
+ if (!attemptedEnable) {
+ // This will be reflected in meta/global in the next stage.
+ this._log.trace(
+ "Engine " +
+ engineName +
+ " was disabled locally. Marking as declined."
+ );
+ toDecline.add(engineName);
+ }
+ }
+ }
+
+ // Any remaining engines were either enabled locally or disabled remotely.
+ for (let engineName of enabled) {
+ let engine = engineManager.get(engineName);
+ if (
+ Svc.PrefBranch.getBoolPref(
+ "engineStatusChanged." + engine.prefName,
+ false
+ )
+ ) {
+ this._log.trace("The " + engineName + " engine was enabled locally.");
+ toUndecline.add(engineName);
+ } else {
+ this._log.trace("The " + engineName + " engine was disabled remotely.");
+
+ // Don't automatically mark it as declined!
+ try {
+ engine.enabled = false;
+ } catch (e) {
+ this._log.trace("Failed to disable engine " + engineName);
+ }
+ }
+ }
+
+ engineManager.decline(toDecline);
+ engineManager.undecline(toUndecline);
+
+ for (const pref of Svc.PrefBranch.getChildList("engineStatusChanged.")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ this.service._ignorePrefObserver = false;
+ },
+
+ async _updateEnabledEngines() {
+ let meta = await this.service.recordManager.get(this.service.metaURL);
+ let numClients = this.service.scheduler.numClients;
+ let engineManager = this.service.engineManager;
+
+ await this._updateEnabledFromMeta(meta, numClients, engineManager);
+ },
+};
+Object.freeze(EngineSynchronizer.prototype);
diff --git a/services/sync/modules/status.sys.mjs b/services/sync/modules/status.sys.mjs
new file mode 100644
index 0000000000..429dbda7b6
--- /dev/null
+++ b/services/sync/modules/status.sys.mjs
@@ -0,0 +1,135 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import {
+ CLIENT_NOT_CONFIGURED,
+ ENGINE_SUCCEEDED,
+ LOGIN_FAILED,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_NO_USERNAME,
+ LOGIN_SUCCEEDED,
+ STATUS_OK,
+ SYNC_FAILED,
+ SYNC_FAILED_PARTIAL,
+ SYNC_SUCCEEDED,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { SyncAuthManager } from "resource://services-sync/sync_auth.sys.mjs";
+
+export var Status = {
+ _log: Log.repository.getLogger("Sync.Status"),
+ __authManager: null,
+ ready: false,
+
+ get _authManager() {
+ if (this.__authManager) {
+ return this.__authManager;
+ }
+ this.__authManager = new SyncAuthManager();
+ return this.__authManager;
+ },
+
+ get service() {
+ return this._service;
+ },
+
+ set service(code) {
+ this._log.debug(
+ "Status.service: " + (this._service || undefined) + " => " + code
+ );
+ this._service = code;
+ },
+
+ get login() {
+ return this._login;
+ },
+
+ set login(code) {
+ this._log.debug("Status.login: " + this._login + " => " + code);
+ this._login = code;
+
+ if (
+ code == LOGIN_FAILED_NO_USERNAME ||
+ code == LOGIN_FAILED_NO_PASSPHRASE
+ ) {
+ this.service = CLIENT_NOT_CONFIGURED;
+ } else if (code != LOGIN_SUCCEEDED) {
+ this.service = LOGIN_FAILED;
+ } else {
+ this.service = STATUS_OK;
+ }
+ },
+
+ get sync() {
+ return this._sync;
+ },
+
+ set sync(code) {
+ this._log.debug("Status.sync: " + this._sync + " => " + code);
+ this._sync = code;
+ this.service = code == SYNC_SUCCEEDED ? STATUS_OK : SYNC_FAILED;
+ },
+
+ get engines() {
+ return this._engines;
+ },
+
+ set engines([name, code]) {
+ this._log.debug("Status for engine " + name + ": " + code);
+ this._engines[name] = code;
+
+ if (code != ENGINE_SUCCEEDED) {
+ this.service = SYNC_FAILED_PARTIAL;
+ }
+ },
+
+ // Implement toString because adding a logger introduces a cyclic object
+ // value, so we can't trivially debug-print Status as JSON.
+ toString: function toString() {
+ return (
+ "<Status" +
+ ": login: " +
+ Status.login +
+ ", service: " +
+ Status.service +
+ ", sync: " +
+ Status.sync +
+ ">"
+ );
+ },
+
+ checkSetup: function checkSetup() {
+ if (!this._authManager.username) {
+ Status.login = LOGIN_FAILED_NO_USERNAME;
+ Status.service = CLIENT_NOT_CONFIGURED;
+ } else if (Status.login == STATUS_OK) {
+ Status.service = STATUS_OK;
+ }
+ return Status.service;
+ },
+
+ resetBackoff: function resetBackoff() {
+ this.enforceBackoff = false;
+ this.backoffInterval = 0;
+ this.minimumNextSync = 0;
+ },
+
+ resetSync: function resetSync() {
+ // Logger setup.
+ this._log.manageLevelFromPref("services.sync.log.logger.status");
+
+ this._log.info("Resetting Status.");
+ this.service = STATUS_OK;
+ this._login = LOGIN_SUCCEEDED;
+ this._sync = SYNC_SUCCEEDED;
+ this._engines = {};
+ this.partial = false;
+ },
+};
+
+// Initialize various status values.
+Status.resetBackoff();
+Status.resetSync();
diff --git a/services/sync/modules/sync_auth.sys.mjs b/services/sync/modules/sync_auth.sys.mjs
new file mode 100644
index 0000000000..6b8da4061c
--- /dev/null
+++ b/services/sync/modules/sync_auth.sys.mjs
@@ -0,0 +1,655 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { TokenServerClient } from "resource://services-common/tokenserverclient.sys.mjs";
+import { CryptoUtils } from "resource://services-crypto/utils.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import {
+ LOGIN_FAILED_LOGIN_REJECTED,
+ LOGIN_FAILED_NETWORK_ERROR,
+ LOGIN_FAILED_NO_USERNAME,
+ LOGIN_SUCCEEDED,
+ MASTER_PASSWORD_LOCKED,
+ STATUS_OK,
+} from "resource://services-sync/constants.sys.mjs";
+
+const lazy = {};
+
+// Lazy imports to prevent unnecessary load on startup.
+ChromeUtils.defineESModuleGetters(lazy, {
+ BulkKeyBundle: "resource://services-sync/keys.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+ChromeUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+ChromeUtils.defineLazyGetter(lazy, "log", function () {
+ let log = Log.repository.getLogger("Sync.SyncAuthManager");
+ log.manageLevelFromPref("services.sync.log.logger.identity");
+ return log;
+});
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "IGNORE_CACHED_AUTH_CREDENTIALS",
+ "services.sync.debug.ignoreCachedAuthCredentials"
+);
+
+// FxAccountsCommon.js doesn't use a "namespace", so create one here.
+import * as fxAccountsCommon from "resource://gre/modules/FxAccountsCommon.sys.mjs";
+
+const SCOPE_OLD_SYNC = fxAccountsCommon.SCOPE_OLD_SYNC;
+
+const OBSERVER_TOPICS = [
+ fxAccountsCommon.ONLOGIN_NOTIFICATION,
+ fxAccountsCommon.ONVERIFIED_NOTIFICATION,
+ fxAccountsCommon.ONLOGOUT_NOTIFICATION,
+ fxAccountsCommon.ON_ACCOUNT_STATE_CHANGE_NOTIFICATION,
+ "weave:connected",
+];
+
+/*
+ General authentication error for abstracting authentication
+ errors from multiple sources (e.g., from FxAccounts, TokenServer).
+ details is additional details about the error - it might be a string, or
+ some other error object (which should do the right thing when toString() is
+ called on it)
+*/
+export function AuthenticationError(details, source) {
+ this.details = details;
+ this.source = source;
+}
+
+AuthenticationError.prototype = {
+ toString() {
+ return "AuthenticationError(" + this.details + ")";
+ },
+};
+
+// The `SyncAuthManager` coordinates access authorization to the Sync server.
+// Its job is essentially to get us from having a signed-in Firefox Accounts user,
+// to knowing the user's sync storage node and having the necessary short-lived
+// credentials in order to access it.
+//
+
+export function SyncAuthManager() {
+ // NOTE: _fxaService and _tokenServerClient are replaced with mocks by
+ // the test suite.
+ this._fxaService = lazy.fxAccounts;
+ this._tokenServerClient = new TokenServerClient();
+ this._tokenServerClient.observerPrefix = "weave:service";
+ this._log = lazy.log;
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_username",
+ "services.sync.username"
+ );
+
+ this.asyncObserver = Async.asyncObserver(this, lazy.log);
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.addObserver(this.asyncObserver, topic);
+ }
+}
+
+SyncAuthManager.prototype = {
+ _fxaService: null,
+ _tokenServerClient: null,
+ // https://docs.services.mozilla.com/token/apis.html
+ _token: null,
+ // protection against the user changing underneath us - the uid
+ // of the current user.
+ _userUid: null,
+
+ hashedUID() {
+ const id = this._fxaService.telemetry.getSanitizedUID();
+ if (!id) {
+ throw new Error("hashedUID: Don't seem to have previously seen a token");
+ }
+ return id;
+ },
+
+ // Return a hashed version of a deviceID, suitable for telemetry.
+ hashedDeviceID(deviceID) {
+ const id = this._fxaService.telemetry.sanitizeDeviceId(deviceID);
+ if (!id) {
+ throw new Error("hashedUID: Don't seem to have previously seen a token");
+ }
+ return id;
+ },
+
+ // The "node type" reported to telemetry or null if not specified.
+ get telemetryNodeType() {
+ return this._token && this._token.node_type ? this._token.node_type : null;
+ },
+
+ finalize() {
+ // After this is called, we can expect Service.identity != this.
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.removeObserver(this.asyncObserver, topic);
+ }
+ this.resetCredentials();
+ this._userUid = null;
+ },
+
+ async getSignedInUser() {
+ let data = await this._fxaService.getSignedInUser();
+ if (!data) {
+ this._userUid = null;
+ return null;
+ }
+ if (this._userUid == null) {
+ this._userUid = data.uid;
+ } else if (this._userUid != data.uid) {
+ throw new Error("The signed in user has changed");
+ }
+ return data;
+ },
+
+ logout() {
+ // This will be called when sync fails (or when the account is being
+ // unlinked etc). It may have failed because we got a 401 from a sync
+ // server, so we nuke the token. Next time sync runs and wants an
+ // authentication header, we will notice the lack of the token and fetch a
+ // new one.
+ this._token = null;
+ },
+
+ async observe(subject, topic, data) {
+ this._log.debug("observed " + topic);
+ if (!this.username) {
+ this._log.info("Sync is not configured, so ignoring the notification");
+ return;
+ }
+ switch (topic) {
+ case "weave:connected":
+ case fxAccountsCommon.ONLOGIN_NOTIFICATION: {
+ this._log.info("Sync has been connected to a logged in user");
+ this.resetCredentials();
+ let accountData = await this.getSignedInUser();
+
+ if (!accountData.verified) {
+ // wait for a verified notification before we kick sync off.
+ this._log.info("The user is not verified");
+ break;
+ }
+ }
+ // We've been configured with an already verified user, so fall-through.
+ // intentional fall-through - the user is verified.
+ case fxAccountsCommon.ONVERIFIED_NOTIFICATION: {
+ this._log.info("The user became verified");
+ lazy.Weave.Status.login = LOGIN_SUCCEEDED;
+
+ // And actually sync. If we've never synced before, we force a full sync.
+ // If we have, then we are probably just reauthenticating so it's a normal sync.
+ // We can use any pref that must be set if we've synced before, and check
+ // the sync lock state because we might already be doing that first sync.
+ let isFirstSync =
+ !lazy.Weave.Service.locked &&
+ !Svc.PrefBranch.getStringPref("client.syncID", null);
+ if (isFirstSync) {
+ this._log.info("Doing initial sync actions");
+ Svc.PrefBranch.setStringPref("firstSync", "resetClient");
+ Services.obs.notifyObservers(null, "weave:service:setup-complete");
+ }
+ // There's no need to wait for sync to complete and it would deadlock
+ // our AsyncObserver.
+ if (!Svc.PrefBranch.getBoolPref("testing.tps", false)) {
+ lazy.Weave.Service.sync({ why: "login" });
+ }
+ break;
+ }
+
+ case fxAccountsCommon.ONLOGOUT_NOTIFICATION:
+ lazy.Weave.Service.startOver()
+ .then(() => {
+ this._log.trace("startOver completed");
+ })
+ .catch(err => {
+ this._log.warn("Failed to reset sync", err);
+ });
+ // startOver will cause this instance to be thrown away, so there's
+ // nothing else to do.
+ break;
+
+ case fxAccountsCommon.ON_ACCOUNT_STATE_CHANGE_NOTIFICATION:
+ // throw away token forcing us to fetch a new one later.
+ this.resetCredentials();
+ break;
+ }
+ },
+
+ /**
+ * Provide override point for testing token expiration.
+ */
+ _now() {
+ return this._fxaService._internal.now();
+ },
+
+ get _localtimeOffsetMsec() {
+ return this._fxaService._internal.localtimeOffsetMsec;
+ },
+
+ get syncKeyBundle() {
+ return this._syncKeyBundle;
+ },
+
+ get username() {
+ return this._username;
+ },
+
+ /**
+ * Set the username value.
+ *
+ * Changing the username has the side-effect of wiping credentials.
+ */
+ set username(value) {
+ // setting .username is an old throwback, but it should no longer happen.
+ throw new Error("don't set the username");
+ },
+
+ /**
+ * Resets all calculated credentials we hold for the current user. This will
+ * *not* force the user to reauthenticate, but instead will force us to
+ * calculate a new key bundle, fetch a new token, etc.
+ */
+ resetCredentials() {
+ this._syncKeyBundle = null;
+ this._token = null;
+ // The cluster URL comes from the token, so resetting it to empty will
+ // force Sync to not accidentally use a value from an earlier token.
+ lazy.Weave.Service.clusterURL = null;
+ },
+
+ /**
+ * Pre-fetches any information that might help with migration away from this
+ * identity. Called after every sync and is really just an optimization that
+ * allows us to avoid a network request for when we actually need the
+ * migration info.
+ */
+ prefetchMigrationSentinel(service) {
+ // nothing to do here until we decide to migrate away from FxA.
+ },
+
+ /**
+ * Verify the current auth state, unlocking the master-password if necessary.
+ *
+ * Returns a promise that resolves with the current auth state after
+ * attempting to unlock.
+ */
+ async unlockAndVerifyAuthState() {
+ let data = await this.getSignedInUser();
+ const fxa = this._fxaService;
+ if (!data) {
+ lazy.log.debug("unlockAndVerifyAuthState has no FxA user");
+ return LOGIN_FAILED_NO_USERNAME;
+ }
+ if (!this.username) {
+ lazy.log.debug(
+ "unlockAndVerifyAuthState finds that sync isn't configured"
+ );
+ return LOGIN_FAILED_NO_USERNAME;
+ }
+ if (!data.verified) {
+ // Treat not verified as if the user needs to re-auth, so the browser
+ // UI reflects the state.
+ lazy.log.debug("unlockAndVerifyAuthState has an unverified user");
+ return LOGIN_FAILED_LOGIN_REJECTED;
+ }
+ if (await fxa.keys.canGetKeyForScope(SCOPE_OLD_SYNC)) {
+ lazy.log.debug(
+ "unlockAndVerifyAuthState already has (or can fetch) sync keys"
+ );
+ return STATUS_OK;
+ }
+ // so no keys - ensure MP unlocked.
+ if (!Utils.ensureMPUnlocked()) {
+ // user declined to unlock, so we don't know if they are stored there.
+ lazy.log.debug(
+ "unlockAndVerifyAuthState: user declined to unlock master-password"
+ );
+ return MASTER_PASSWORD_LOCKED;
+ }
+ // If we still can't get keys it probably means the user authenticated
+ // without unlocking the MP or cleared the saved logins, so we've now
+ // lost them - the user will need to reauth before continuing.
+ let result;
+ if (await fxa.keys.canGetKeyForScope(SCOPE_OLD_SYNC)) {
+ result = STATUS_OK;
+ } else {
+ result = LOGIN_FAILED_LOGIN_REJECTED;
+ }
+ lazy.log.debug(
+ "unlockAndVerifyAuthState re-fetched credentials and is returning",
+ result
+ );
+ return result;
+ },
+
+ /**
+ * Do we have a non-null, not yet expired token for the user currently
+ * signed in?
+ */
+ _hasValidToken() {
+ // If pref is set to ignore cached authentication credentials for debugging,
+ // then return false to force the fetching of a new token.
+ if (lazy.IGNORE_CACHED_AUTH_CREDENTIALS) {
+ return false;
+ }
+ if (!this._token) {
+ return false;
+ }
+ if (this._token.expiration < this._now()) {
+ return false;
+ }
+ return true;
+ },
+
+ // Get our tokenServerURL - a private helper. Returns a string.
+ get _tokenServerUrl() {
+ // We used to support services.sync.tokenServerURI but this was a
+ // pain-point for people using non-default servers as Sync may auto-reset
+ // all services.sync prefs. So if that still exists, it wins.
+ let url = Svc.PrefBranch.getStringPref("tokenServerURI", null); // Svc.PrefBranch "root" is services.sync
+ if (!url) {
+ url = Services.prefs.getStringPref("identity.sync.tokenserver.uri");
+ }
+ while (url.endsWith("/")) {
+ // trailing slashes cause problems...
+ url = url.slice(0, -1);
+ }
+ return url;
+ },
+
+ // Refresh the sync token for our user. Returns a promise that resolves
+ // with a token, or rejects with an error.
+ async _fetchTokenForUser() {
+ const fxa = this._fxaService;
+ // We need keys for things to work. If we don't have them, just
+ // return null for the token - sync calling unlockAndVerifyAuthState()
+ // before actually syncing will setup the error states if necessary.
+ if (!(await fxa.keys.canGetKeyForScope(SCOPE_OLD_SYNC))) {
+ this._log.info(
+ "Unable to fetch keys (master-password locked?), so aborting token fetch"
+ );
+ throw new Error("Can't fetch a token as we can't get keys");
+ }
+
+ // Do the token dance, with a retry in case of transient auth failure.
+ // We need to prove that we know the sync key in order to get a token
+ // from the tokenserver.
+ let getToken = async key => {
+ this._log.info("Getting a sync token from", this._tokenServerUrl);
+ let token = await this._fetchTokenUsingOAuth(key);
+ this._log.trace("Successfully got a token");
+ return token;
+ };
+
+ try {
+ let token, key;
+ try {
+ this._log.info("Getting sync key");
+ key = await fxa.keys.getKeyForScope(SCOPE_OLD_SYNC);
+ if (!key) {
+ throw new Error("browser does not have the sync key, cannot sync");
+ }
+ token = await getToken(key);
+ } catch (err) {
+ // If we get a 401 fetching the token it may be that our auth tokens needed
+ // to be regenerated; retry exactly once.
+ if (!err.response || err.response.status !== 401) {
+ throw err;
+ }
+ this._log.warn(
+ "Token server returned 401, retrying token fetch with fresh credentials"
+ );
+ key = await fxa.keys.getKeyForScope(SCOPE_OLD_SYNC);
+ token = await getToken(key);
+ }
+ // TODO: Make it be only 80% of the duration, so refresh the token
+ // before it actually expires. This is to avoid sync storage errors
+ // otherwise, we may briefly enter a "needs reauthentication" state.
+ // (XXX - the above may no longer be true - someone should check ;)
+ token.expiration = this._now() + token.duration * 1000 * 0.8;
+ if (!this._syncKeyBundle) {
+ this._syncKeyBundle = lazy.BulkKeyBundle.fromJWK(key);
+ }
+ lazy.Weave.Status.login = LOGIN_SUCCEEDED;
+ this._token = token;
+ return token;
+ } catch (caughtErr) {
+ let err = caughtErr; // The error we will rethrow.
+
+ // TODO: unify these errors - we need to handle errors thrown by
+ // both tokenserverclient and hawkclient.
+ // A tokenserver error thrown based on a bad response.
+ if (err.response && err.response.status === 401) {
+ err = new AuthenticationError(err, "tokenserver");
+ // A hawkclient error.
+ } else if (err.code && err.code === 401) {
+ err = new AuthenticationError(err, "hawkclient");
+ // An FxAccounts.jsm error.
+ } else if (err.message == fxAccountsCommon.ERROR_AUTH_ERROR) {
+ err = new AuthenticationError(err, "fxaccounts");
+ }
+
+ // TODO: write tests to make sure that different auth error cases are handled here
+ // properly: auth error getting oauth token, auth error getting sync token (invalid
+ // generation or client-state error)
+ if (err instanceof AuthenticationError) {
+ this._log.error("Authentication error in _fetchTokenForUser", err);
+ // set it to the "fatal" LOGIN_FAILED_LOGIN_REJECTED reason.
+ lazy.Weave.Status.login = LOGIN_FAILED_LOGIN_REJECTED;
+ } else {
+ this._log.error("Non-authentication error in _fetchTokenForUser", err);
+ // for now assume it is just a transient network related problem
+ // (although sadly, it might also be a regular unhandled exception)
+ lazy.Weave.Status.login = LOGIN_FAILED_NETWORK_ERROR;
+ }
+ throw err;
+ }
+ },
+
+ /**
+ * Generates an OAuth access_token using the OLD_SYNC scope and exchanges it
+ * for a TokenServer token.
+ *
+ * @returns {Promise}
+ * @private
+ */
+ async _fetchTokenUsingOAuth(key) {
+ this._log.debug("Getting a token using OAuth");
+ const fxa = this._fxaService;
+ const ttl = fxAccountsCommon.OAUTH_TOKEN_FOR_SYNC_LIFETIME_SECONDS;
+ const accessToken = await fxa.getOAuthToken({ scope: SCOPE_OLD_SYNC, ttl });
+ const headers = {
+ "X-KeyId": key.kid,
+ };
+
+ return this._tokenServerClient
+ .getTokenUsingOAuth(this._tokenServerUrl, accessToken, headers)
+ .catch(async err => {
+ if (err.response && err.response.status === 401) {
+ // remove the cached token if we cannot authorize with it.
+ // we have to do this here because we know which `token` to remove
+ // from cache.
+ await fxa.removeCachedOAuthToken({ token: accessToken });
+ }
+
+ // continue the error chain, so other handlers can deal with the error.
+ throw err;
+ });
+ },
+
+ // Returns a promise that is resolved with a valid token for the current
+ // user, or rejects if one can't be obtained.
+ // NOTE: This does all the authentication for Sync - it both sets the
+ // key bundle (ie, decryption keys) and does the token fetch. These 2
+ // concepts could be decoupled, but there doesn't seem any value in that
+ // currently.
+ async _ensureValidToken(forceNewToken = false) {
+ let signedInUser = await this.getSignedInUser();
+ if (!signedInUser) {
+ throw new Error("no user is logged in");
+ }
+ if (!signedInUser.verified) {
+ throw new Error("user is not verified");
+ }
+
+ await this.asyncObserver.promiseObserversComplete();
+
+ if (!forceNewToken && this._hasValidToken()) {
+ this._log.trace("_ensureValidToken already has one");
+ return this._token;
+ }
+
+ // We are going to grab a new token - re-use the same promise if we are
+ // already fetching one.
+ if (!this._ensureValidTokenPromise) {
+ this._ensureValidTokenPromise = this.__ensureValidToken().finally(() => {
+ this._ensureValidTokenPromise = null;
+ });
+ }
+ return this._ensureValidTokenPromise;
+ },
+
+ async __ensureValidToken() {
+ // reset this._token as a safety net to reduce the possibility of us
+ // repeatedly attempting to use an invalid token if _fetchTokenForUser throws.
+ this._token = null;
+ try {
+ let token = await this._fetchTokenForUser();
+ this._token = token;
+ // This is a little bit of a hack. The tokenserver tells us a HMACed version
+ // of the FxA uid which we can use for metrics purposes without revealing the
+ // user's true uid. It conceptually belongs to FxA but we get it from tokenserver
+ // for legacy reasons. Hand it back to the FxA client code to deal with.
+ this._fxaService.telemetry._setHashedUID(token.hashed_fxa_uid);
+ return token;
+ } finally {
+ Services.obs.notifyObservers(null, "weave:service:login:got-hashed-id");
+ }
+ },
+
+ getResourceAuthenticator() {
+ return this._getAuthenticationHeader.bind(this);
+ },
+
+ /**
+ * @return a Hawk HTTP Authorization Header, lightly wrapped, for the .uri
+ * of a RESTRequest or AsyncResponse object.
+ */
+ async _getAuthenticationHeader(httpObject, method) {
+ // Note that in failure states we return null, causing the request to be
+ // made without authorization headers, thereby presumably causing a 401,
+ // which causes Sync to log out. If we throw, this may not happen as
+ // expected.
+ try {
+ await this._ensureValidToken();
+ } catch (ex) {
+ this._log.error("Failed to fetch a token for authentication", ex);
+ return null;
+ }
+ if (!this._token) {
+ return null;
+ }
+ let credentials = { id: this._token.id, key: this._token.key };
+ method = method || httpObject.method;
+
+ // Get the local clock offset from the Firefox Accounts server. This should
+ // be close to the offset from the storage server.
+ let options = {
+ now: this._now(),
+ localtimeOffsetMsec: this._localtimeOffsetMsec,
+ credentials,
+ };
+
+ let headerValue = await CryptoUtils.computeHAWK(
+ httpObject.uri,
+ method,
+ options
+ );
+ return { headers: { authorization: headerValue.field } };
+ },
+
+ /**
+ * Determine the cluster for the current user and update state.
+ * Returns true if a new cluster URL was found and it is different from
+ * the existing cluster URL, false otherwise.
+ */
+ async setCluster() {
+ // Make sure we didn't get some unexpected response for the cluster.
+ let cluster = await this._findCluster();
+ this._log.debug("Cluster value = " + cluster);
+ if (cluster == null) {
+ return false;
+ }
+
+ // Convert from the funky "String object with additional properties" that
+ // resource.js returns to a plain-old string.
+ cluster = cluster.toString();
+ // Don't update stuff if we already have the right cluster
+ if (cluster == lazy.Weave.Service.clusterURL) {
+ return false;
+ }
+
+ this._log.debug("Setting cluster to " + cluster);
+ lazy.Weave.Service.clusterURL = cluster;
+
+ return true;
+ },
+
+ async _findCluster() {
+ try {
+ // Ensure we are ready to authenticate and have a valid token.
+ // We need to handle node reassignment here. If we are being asked
+ // for a clusterURL while the service already has a clusterURL, then
+ // it's likely a 401 was received using the existing token - in which
+ // case we just discard the existing token and fetch a new one.
+ let forceNewToken = false;
+ if (lazy.Weave.Service.clusterURL) {
+ this._log.debug(
+ "_findCluster has a pre-existing clusterURL, so fetching a new token token"
+ );
+ forceNewToken = true;
+ }
+ let token = await this._ensureValidToken(forceNewToken);
+ let endpoint = token.endpoint;
+ // For Sync 1.5 storage endpoints, we use the base endpoint verbatim.
+ // However, it should end in "/" because we will extend it with
+ // well known path components. So we add a "/" if it's missing.
+ if (!endpoint.endsWith("/")) {
+ endpoint += "/";
+ }
+ this._log.debug("_findCluster returning " + endpoint);
+ return endpoint;
+ } catch (err) {
+ this._log.info("Failed to fetch the cluster URL", err);
+ // service.js's verifyLogin() method will attempt to fetch a cluster
+ // URL when it sees a 401. If it gets null, it treats it as a "real"
+ // auth error and sets Status.login to LOGIN_FAILED_LOGIN_REJECTED, which
+ // in turn causes a notification bar to appear informing the user they
+ // need to re-authenticate.
+ // On the other hand, if fetching the cluster URL fails with an exception,
+ // verifyLogin() assumes it is a transient error, and thus doesn't show
+ // the notification bar under the assumption the issue will resolve
+ // itself.
+ // Thus:
+ // * On a real 401, we must return null.
+ // * On any other problem we must let an exception bubble up.
+ if (err instanceof AuthenticationError) {
+ return null;
+ }
+ throw err;
+ }
+ },
+};
diff --git a/services/sync/modules/telemetry.sys.mjs b/services/sync/modules/telemetry.sys.mjs
new file mode 100644
index 0000000000..c08f405b0e
--- /dev/null
+++ b/services/sync/modules/telemetry.sys.mjs
@@ -0,0 +1,1279 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Support for Sync-and-FxA-related telemetry, which is submitted in a special-purpose
+// telemetry ping called the "sync ping", documented here:
+//
+// ../../../toolkit/components/telemetry/docs/data/sync-ping.rst
+//
+// The sync ping contains identifiers that are linked to the user's Firefox Account
+// and are separate from the main telemetry client_id, so this file is also responsible
+// for ensuring that we can delete those pings upon user request, by plumbing its
+// identifiers into the "deletion-request" ping.
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Async: "resource://services-common/async.sys.mjs",
+ AuthenticationError: "resource://services-sync/sync_auth.sys.mjs",
+ FxAccounts: "resource://gre/modules/FxAccounts.sys.mjs",
+ ObjectUtils: "resource://gre/modules/ObjectUtils.sys.mjs",
+ Observers: "resource://services-common/observers.sys.mjs",
+ Resource: "resource://services-sync/resource.sys.mjs",
+ Status: "resource://services-sync/status.sys.mjs",
+ Svc: "resource://services-sync/util.sys.mjs",
+ TelemetryController: "resource://gre/modules/TelemetryController.sys.mjs",
+ TelemetryEnvironment: "resource://gre/modules/TelemetryEnvironment.sys.mjs",
+ TelemetryUtils: "resource://gre/modules/TelemetryUtils.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+ChromeUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+import * as constants from "resource://services-sync/constants.sys.mjs";
+
+ChromeUtils.defineLazyGetter(
+ lazy,
+ "WeaveService",
+ () => Cc["@mozilla.org/weave/service;1"].getService().wrappedJSObject
+);
+const log = Log.repository.getLogger("Sync.Telemetry");
+
+const TOPICS = [
+ // For tracking change to account/device identifiers.
+ "fxaccounts:new_device_id",
+ "fxaccounts:onlogout",
+ "weave:service:ready",
+ "weave:service:login:got-hashed-id",
+
+ // For whole-of-sync metrics.
+ "weave:service:sync:start",
+ "weave:service:sync:finish",
+ "weave:service:sync:error",
+
+ // For individual engine metrics.
+ "weave:engine:sync:start",
+ "weave:engine:sync:finish",
+ "weave:engine:sync:error",
+ "weave:engine:sync:applied",
+ "weave:engine:sync:step",
+ "weave:engine:sync:uploaded",
+ "weave:engine:validate:finish",
+ "weave:engine:validate:error",
+
+ // For ad-hoc telemetry events.
+ "weave:telemetry:event",
+ "weave:telemetry:histogram",
+ "fxa:telemetry:event",
+
+ "weave:telemetry:migration",
+];
+
+const PING_FORMAT_VERSION = 1;
+
+const EMPTY_UID = "0".repeat(32);
+
+// The set of engines we record telemetry for - any other engines are ignored.
+const ENGINES = new Set([
+ "addons",
+ "bookmarks",
+ "clients",
+ "forms",
+ "history",
+ "passwords",
+ "prefs",
+ "tabs",
+ "extension-storage",
+ "addresses",
+ "creditcards",
+]);
+
+function tryGetMonotonicTimestamp() {
+ try {
+ return Services.telemetry.msSinceProcessStart();
+ } catch (e) {
+ log.warn("Unable to get a monotonic timestamp!");
+ return -1;
+ }
+}
+
+function timeDeltaFrom(monotonicStartTime) {
+ let now = tryGetMonotonicTimestamp();
+ if (monotonicStartTime !== -1 && now !== -1) {
+ return Math.round(now - monotonicStartTime);
+ }
+ return -1;
+}
+
+const NS_ERROR_MODULE_BASE_OFFSET = 0x45;
+const NS_ERROR_MODULE_NETWORK = 6;
+
+// A reimplementation of NS_ERROR_GET_MODULE, which surprisingly doesn't seem
+// to exist anywhere in .js code in a way that can be reused.
+// This is taken from DownloadCore.sys.mjs.
+function NS_ERROR_GET_MODULE(code) {
+ return ((code & 0x7fff0000) >> 16) - NS_ERROR_MODULE_BASE_OFFSET;
+}
+
+// Converts extra integer fields to strings, rounds floats to three
+// decimal places (nanosecond precision for timings), and removes profile
+// directory paths and URLs from potential error messages.
+function normalizeExtraTelemetryFields(extra) {
+ let result = {};
+ for (let key in extra) {
+ let value = extra[key];
+ let type = typeof value;
+ if (type == "string") {
+ result[key] = ErrorSanitizer.cleanErrorMessage(value);
+ } else if (type == "number") {
+ result[key] = Number.isInteger(value)
+ ? value.toString(10)
+ : value.toFixed(3);
+ } else if (type != "undefined") {
+ throw new TypeError(
+ `Invalid type ${type} for extra telemetry field ${key}`
+ );
+ }
+ }
+ return lazy.ObjectUtils.isEmpty(result) ? undefined : result;
+}
+
+// Keps track of the counts of individual records fate during a sync cycle
+// The main reason this is a class is to keep track of reasons individual records
+// failure reasons without huge memory overhead.
+export class SyncedRecordsTelemetry {
+ // applied => number of items that should be applied.
+ // failed => number of items that failed in this sync.
+ // newFailed => number of items that failed for the first time in this sync.
+ // reconciled => number of items that were reconciled.
+ // failedReasons => {name, count} of reasons a record failed
+ incomingCounts = {
+ applied: 0,
+ failed: 0,
+ newFailed: 0,
+ reconciled: 0,
+ failedReasons: null,
+ };
+ outgoingCounts = { failed: 0, sent: 0, failedReasons: null };
+
+ addIncomingFailedReason(reason) {
+ if (!this.incomingCounts.failedReasons) {
+ this.incomingCounts.failedReasons = [];
+ }
+ let transformedReason = SyncTelemetry.transformError(reason);
+ // Some errors like http/nss errors don't have an error object
+ // those will be caught by the higher level telemetry
+ if (!transformedReason.error) {
+ return;
+ }
+
+ let index = this.incomingCounts.failedReasons.findIndex(
+ reasons => reasons.name === transformedReason.error
+ );
+
+ if (index >= 0) {
+ this.incomingCounts.failedReasons[index].count += 1;
+ } else {
+ this.incomingCounts.failedReasons.push({
+ name: transformedReason.error,
+ count: 1,
+ });
+ }
+ }
+
+ addOutgoingFailedReason(reason) {
+ if (!this.outgoingCounts.failedReasons) {
+ this.outgoingCounts.failedReasons = [];
+ }
+ let transformedReason = SyncTelemetry.transformError(reason);
+ // Some errors like http/nss errors don't have an error object
+ // those will be caught by the higher level telemetry
+ if (!transformedReason.error) {
+ return;
+ }
+ let index = this.outgoingCounts.failedReasons.findIndex(
+ reasons => reasons.name === transformedReason.error
+ );
+ if (index >= 0) {
+ this.outgoingCounts.failedReasons[index].count += 1;
+ } else {
+ this.outgoingCounts.failedReasons.push({
+ name: transformedReason.error,
+ count: 1,
+ });
+ }
+ }
+}
+
+// The `ErrorSanitizer` has 2 main jobs:
+// * Remove PII from errors, such as the profile dir or URLs the user might
+// have visited.
+// * Normalize errors so different locales or operating systems etc don't
+// generate different messages for the same underlying error.
+// * [TODO] Normalize errors so environmental factors don't influence message.
+// For example, timestamps or GUIDs should be replaced with something static.
+export class ErrorSanitizer {
+ // Things we normalize - this isn't exhaustive, but covers the common error messages we see.
+ // Eg:
+ // > Win error 112 during operation write on file [profileDir]\weave\addonsreconciler.json (Espacio en disco insuficiente. )
+ // > Win error 112 during operation write on file [profileDir]\weave\addonsreconciler.json (Diskte yeterli yer yok. )
+ // > <snip many other translations of the error>
+ // > Unix error 28 during operation write on file [profileDir]/weave/addonsreconciler.json (No space left on device)
+ // These tend to crowd out other errors we might care about (eg, top 5 errors for some engines are
+ // variations of the "no space left on device")
+
+ // Note that only errors that have same-but-different errors on Windows and Unix are here - we
+ // still sanitize ones that aren't in these maps to remove the translations etc - eg,
+ // `ERROR_SHARING_VIOLATION` doesn't really have a unix equivalent, so doesn't appear here, but
+ // we still strip the translations to avoid the variants.
+ static E_PERMISSION_DENIED = "OS error [Permission denied]";
+ static E_NO_FILE_OR_DIR = "OS error [File/Path not found]";
+
+ static DOMErrorSubstitutions = {
+ NotFoundError: this.E_NO_FILE_OR_DIR,
+ NotAllowedError: this.E_PERMISSION_DENIED,
+ };
+
+ static #cleanOSErrorMessage(message, error = undefined) {
+ if (DOMException.isInstance(error)) {
+ const sub = this.DOMErrorSubstitutions[error.name];
+ message = message.replaceAll("\\", "/");
+ if (sub) {
+ return `${sub} ${message}`;
+ }
+ }
+
+ return message;
+ }
+
+ // A regex we can use to replace the profile dir in error messages. We use a
+ // regexp so we can simply replace all case-insensitive occurences.
+ // This escaping function is from:
+ // https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions
+ static reProfileDir = new RegExp(
+ PathUtils.profileDir.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"),
+ "gi"
+ );
+
+ /**
+ * Clean an error message, removing PII and normalizing OS-specific messages.
+ *
+ * @param {string} message The error message
+ * @param {Error?} error The error class instance, if any.
+ */
+ static cleanErrorMessage(message, error = undefined) {
+ // There's a chance the profiledir is in the error string which is PII we
+ // want to avoid including in the ping.
+ message = message.replace(this.reProfileDir, "[profileDir]");
+ // MSG_INVALID_URL from /dom/bindings/Errors.msg -- no way to access this
+ // directly from JS.
+ if (message.endsWith("is not a valid URL.")) {
+ message = "<URL> is not a valid URL.";
+ }
+ // Try to filter things that look somewhat like a URL (in that they contain a
+ // colon in the middle of non-whitespace), in case anything else is including
+ // these in error messages. Note that JSON.stringified stuff comes through
+ // here, so we explicitly ignore double-quotes as well.
+ message = message.replace(/[^\s"]+:[^\s"]+/g, "<URL>");
+
+ // Anywhere that's normalized the guid in errors we can easily filter
+ // to make it easier to aggregate these types of errors
+ message = message.replace(/<guid: ([^>]+)>/g, "<GUID>");
+
+ return this.#cleanOSErrorMessage(message, error);
+ }
+}
+
+// This function validates the payload of a telemetry "event" - this can be
+// removed once there are APIs available for the telemetry modules to collect
+// these events (bug 1329530) - but for now we simulate that planned API as
+// best we can.
+function validateTelemetryEvent(eventDetails) {
+ let { object, method, value, extra } = eventDetails;
+ // Do do basic validation of the params - everything except "extra" must
+ // be a string. method and object are required.
+ if (
+ typeof method != "string" ||
+ typeof object != "string" ||
+ (value && typeof value != "string") ||
+ (extra && typeof extra != "object")
+ ) {
+ log.warn("Invalid event parameters - wrong types", eventDetails);
+ return false;
+ }
+ // length checks.
+ if (
+ method.length > 20 ||
+ object.length > 20 ||
+ (value && value.length > 80)
+ ) {
+ log.warn("Invalid event parameters - wrong lengths", eventDetails);
+ return false;
+ }
+
+ // extra can be falsey, or an object with string names and values.
+ if (extra) {
+ if (Object.keys(extra).length > 10) {
+ log.warn("Invalid event parameters - too many extra keys", eventDetails);
+ return false;
+ }
+ for (let [ename, evalue] of Object.entries(extra)) {
+ if (
+ typeof ename != "string" ||
+ ename.length > 15 ||
+ typeof evalue != "string" ||
+ evalue.length > 85
+ ) {
+ log.warn(
+ `Invalid event parameters: extra item "${ename} is invalid`,
+ eventDetails
+ );
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+class EngineRecord {
+ constructor(name) {
+ // startTime is in ms from process start, but is monotonic (unlike Date.now())
+ // so we need to keep both it and when.
+ this.startTime = tryGetMonotonicTimestamp();
+ this.name = name;
+
+ // This allows cases like bookmarks-buffered to have a separate name from
+ // the bookmarks engine.
+ let engineImpl = lazy.Weave.Service.engineManager.get(name);
+ if (engineImpl && engineImpl.overrideTelemetryName) {
+ this.overrideTelemetryName = engineImpl.overrideTelemetryName;
+ }
+ }
+
+ toJSON() {
+ let result = { name: this.overrideTelemetryName || this.name };
+ let properties = [
+ "took",
+ "status",
+ "failureReason",
+ "incoming",
+ "outgoing",
+ "validation",
+ "steps",
+ ];
+ for (let property of properties) {
+ result[property] = this[property];
+ }
+ return result;
+ }
+
+ finished(error) {
+ let took = timeDeltaFrom(this.startTime);
+ if (took > 0) {
+ this.took = took;
+ }
+ if (error) {
+ this.failureReason = SyncTelemetry.transformError(error);
+ }
+ }
+
+ recordApplied(counts) {
+ if (this.incoming) {
+ log.error(
+ `Incoming records applied multiple times for engine ${this.name}!`
+ );
+ return;
+ }
+ if (this.name === "clients" && !counts.failed) {
+ // ignore successful application of client records
+ // since otherwise they show up every time and are meaningless.
+ return;
+ }
+
+ let incomingData = {};
+
+ if (counts.failedReasons) {
+ // sort the failed reasons in desc by count, then take top 10
+ counts.failedReasons = counts.failedReasons
+ .sort((a, b) => b.count - a.count)
+ .slice(0, 10);
+ }
+ // Counts has extra stuff used for logging, but we only care about a few
+ let properties = ["applied", "failed", "failedReasons"];
+ // Only record non-zero properties and only record incoming at all if
+ // there's at least one property we care about.
+ for (let property of properties) {
+ if (counts[property]) {
+ incomingData[property] = counts[property];
+ this.incoming = incomingData;
+ }
+ }
+ }
+
+ recordStep(stepResult) {
+ let step = {
+ name: stepResult.name,
+ };
+ if (stepResult.took > 0) {
+ step.took = Math.round(stepResult.took);
+ }
+ if (stepResult.counts) {
+ let counts = stepResult.counts.filter(({ count }) => count > 0);
+ if (counts.length) {
+ step.counts = counts;
+ }
+ }
+ if (this.steps) {
+ this.steps.push(step);
+ } else {
+ this.steps = [step];
+ }
+ }
+
+ recordValidation(validationResult) {
+ if (this.validation) {
+ log.error(`Multiple validations occurred for engine ${this.name}!`);
+ return;
+ }
+ let { problems, version, took, checked } = validationResult;
+ let validation = {
+ version: version || 0,
+ checked: checked || 0,
+ };
+ if (took > 0) {
+ validation.took = Math.round(took);
+ }
+ let summarized = problems.filter(({ count }) => count > 0);
+ if (summarized.length) {
+ validation.problems = summarized;
+ }
+ this.validation = validation;
+ }
+
+ recordValidationError(e) {
+ if (this.validation) {
+ log.error(`Multiple validations occurred for engine ${this.name}!`);
+ return;
+ }
+
+ this.validation = {
+ failureReason: SyncTelemetry.transformError(e),
+ };
+ }
+
+ recordUploaded(counts) {
+ if (counts.sent || counts.failed) {
+ if (!this.outgoing) {
+ this.outgoing = [];
+ }
+ if (counts.failedReasons) {
+ // sort the failed reasons in desc by count, then take top 10
+ counts.failedReasons = counts.failedReasons
+ .sort((a, b) => b.count - a.count)
+ .slice(0, 10);
+ }
+ this.outgoing.push({
+ sent: counts.sent || undefined,
+ failed: counts.failed || undefined,
+ failedReasons: counts.failedReasons || undefined,
+ });
+ }
+ }
+}
+
+// The record of a single "sync" - typically many of these are submitted in
+// a single ping (ie, as a 'syncs' array)
+export class SyncRecord {
+ constructor(allowedEngines, why) {
+ this.allowedEngines = allowedEngines;
+ // Our failure reason. This property only exists in the generated ping if an
+ // error actually occurred.
+ this.failureReason = undefined;
+ this.syncNodeType = null;
+ this.when = Date.now();
+ this.startTime = tryGetMonotonicTimestamp();
+ this.took = 0; // will be set later.
+ this.why = why;
+
+ // All engines that have finished (ie, does not include the "current" one)
+ // We omit this from the ping if it's empty.
+ this.engines = [];
+ // The engine that has started but not yet stopped.
+ this.currentEngine = null;
+ }
+
+ toJSON() {
+ let result = {
+ when: this.when,
+ took: this.took,
+ failureReason: this.failureReason,
+ status: this.status,
+ };
+ if (this.why) {
+ result.why = this.why;
+ }
+ let engines = [];
+ for (let engine of this.engines) {
+ engines.push(engine.toJSON());
+ }
+ if (engines.length) {
+ result.engines = engines;
+ }
+ return result;
+ }
+
+ finished(error) {
+ this.took = timeDeltaFrom(this.startTime);
+ if (this.currentEngine != null) {
+ log.error(
+ "Finished called for the sync before the current engine finished"
+ );
+ this.currentEngine.finished(null);
+ this.onEngineStop(this.currentEngine.name);
+ }
+ if (error) {
+ this.failureReason = SyncTelemetry.transformError(error);
+ }
+
+ this.syncNodeType = lazy.Weave.Service.identity.telemetryNodeType;
+
+ // Check for engine statuses. -- We do this now, and not in engine.finished
+ // to make sure any statuses that get set "late" are recorded
+ for (let engine of this.engines) {
+ let status = lazy.Status.engines[engine.name];
+ if (status && status !== constants.ENGINE_SUCCEEDED) {
+ engine.status = status;
+ }
+ }
+
+ let statusObject = {};
+
+ let serviceStatus = lazy.Status.service;
+ if (serviceStatus && serviceStatus !== constants.STATUS_OK) {
+ statusObject.service = serviceStatus;
+ this.status = statusObject;
+ }
+ let syncStatus = lazy.Status.sync;
+ if (syncStatus && syncStatus !== constants.SYNC_SUCCEEDED) {
+ statusObject.sync = syncStatus;
+ this.status = statusObject;
+ }
+ }
+
+ onEngineStart(engineName) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+
+ if (this.currentEngine) {
+ log.error(
+ `Being told that engine ${engineName} has started, but current engine ${this.currentEngine.name} hasn't stopped`
+ );
+ // Just discard the current engine rather than making up data for it.
+ }
+ this.currentEngine = new EngineRecord(engineName);
+ }
+
+ onEngineStop(engineName, error) {
+ // We only care if it's the current engine if we have a current engine.
+ if (this._shouldIgnoreEngine(engineName, !!this.currentEngine)) {
+ return;
+ }
+ if (!this.currentEngine) {
+ // It's possible for us to get an error before the start message of an engine
+ // (somehow), in which case we still want to record that error.
+ if (!error) {
+ return;
+ }
+ log.error(
+ `Error triggered on ${engineName} when no current engine exists: ${error}`
+ );
+ this.currentEngine = new EngineRecord(engineName);
+ }
+ this.currentEngine.finished(error);
+ this.engines.push(this.currentEngine);
+ this.currentEngine = null;
+ }
+
+ onEngineApplied(engineName, counts) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordApplied(counts);
+ }
+
+ onEngineStep(engineName, step) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordStep(step);
+ }
+
+ onEngineValidated(engineName, validationData) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+ let engine = this.engines.find(e => e.name === engineName);
+ if (
+ !engine &&
+ this.currentEngine &&
+ engineName === this.currentEngine.name
+ ) {
+ engine = this.currentEngine;
+ }
+ if (engine) {
+ engine.recordValidation(validationData);
+ } else {
+ log.warn(
+ `Validation event triggered for engine ${engineName}, which hasn't been synced!`
+ );
+ }
+ }
+
+ onEngineValidateError(engineName, error) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+ let engine = this.engines.find(e => e.name === engineName);
+ if (
+ !engine &&
+ this.currentEngine &&
+ engineName === this.currentEngine.name
+ ) {
+ engine = this.currentEngine;
+ }
+ if (engine) {
+ engine.recordValidationError(error);
+ } else {
+ log.warn(
+ `Validation failure event triggered for engine ${engineName}, which hasn't been synced!`
+ );
+ }
+ }
+
+ onEngineUploaded(engineName, counts) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordUploaded(counts);
+ }
+
+ _shouldIgnoreEngine(engineName, shouldBeCurrent = true) {
+ if (!this.allowedEngines.has(engineName)) {
+ log.info(
+ `Notification for engine ${engineName}, but we aren't recording telemetry for it`
+ );
+ return true;
+ }
+ if (shouldBeCurrent) {
+ if (!this.currentEngine || engineName != this.currentEngine.name) {
+ log.info(`Notification for engine ${engineName} but it isn't current`);
+ return true;
+ }
+ }
+ return false;
+ }
+}
+
+// The entire "sync ping" - it includes all the syncs, events etc recorded in
+// the ping.
+class SyncTelemetryImpl {
+ constructor(allowedEngines) {
+ log.manageLevelFromPref("services.sync.log.logger.telemetry");
+ // This is accessible so we can enable custom engines during tests.
+ this.allowedEngines = allowedEngines;
+ this.current = null;
+ this.setupObservers();
+
+ this.payloads = [];
+ this.discarded = 0;
+ this.events = [];
+ this.histograms = {};
+ this.migrations = [];
+ this.maxEventsCount = lazy.Svc.PrefBranch.getIntPref(
+ "telemetry.maxEventsCount",
+ 1000
+ );
+ this.maxPayloadCount = lazy.Svc.PrefBranch.getIntPref(
+ "telemetry.maxPayloadCount"
+ );
+ this.submissionInterval =
+ lazy.Svc.PrefBranch.getIntPref("telemetry.submissionInterval") * 1000;
+ this.lastSubmissionTime = Services.telemetry.msSinceProcessStart();
+ this.lastUID = EMPTY_UID;
+ this.lastSyncNodeType = null;
+ this.currentSyncNodeType = null;
+ // Note that the sessionStartDate is somewhat arbitrary - the telemetry
+ // modules themselves just use `new Date()`. This means that our startDate
+ // isn't going to be the same as the sessionStartDate in the main pings,
+ // but that's OK for now - if it's a problem we'd need to change the
+ // telemetry modules to expose what it thinks the sessionStartDate is.
+ let sessionStartDate = new Date();
+ this.sessionStartDate = lazy.TelemetryUtils.toLocalTimeISOString(
+ lazy.TelemetryUtils.truncateToHours(sessionStartDate)
+ );
+ lazy.TelemetryController.registerSyncPingShutdown(() => this.shutdown());
+ }
+
+ sanitizeFxaDeviceId(deviceId) {
+ return lazy.fxAccounts.telemetry.sanitizeDeviceId(deviceId);
+ }
+
+ prepareFxaDevices(devices) {
+ // For non-sync users, the data per device is limited -- just an id and a
+ // type (and not even the id yet). For sync users, if we can correctly map
+ // the fxaDevice to a sync device, then we can get os and version info,
+ // which would be quite unfortunate to lose.
+ let extraInfoMap = new Map();
+ if (this.syncIsEnabled()) {
+ for (let client of this.getClientsEngineRecords()) {
+ if (client.fxaDeviceId) {
+ extraInfoMap.set(client.fxaDeviceId, {
+ os: client.os,
+ version: client.version,
+ syncID: this.sanitizeFxaDeviceId(client.id),
+ });
+ }
+ }
+ }
+ // Finally, sanitize and convert to the proper format.
+ return devices.map(d => {
+ let { os, version, syncID } = extraInfoMap.get(d.id) || {
+ os: undefined,
+ version: undefined,
+ syncID: undefined,
+ };
+ return {
+ id: this.sanitizeFxaDeviceId(d.id) || EMPTY_UID,
+ type: d.type,
+ os,
+ version,
+ syncID,
+ };
+ });
+ }
+
+ syncIsEnabled() {
+ return lazy.WeaveService.enabled && lazy.WeaveService.ready;
+ }
+
+ // Separate for testing.
+ getClientsEngineRecords() {
+ if (!this.syncIsEnabled()) {
+ throw new Error("Bug: syncIsEnabled() must be true, check it first");
+ }
+ return lazy.Weave.Service.clientsEngine.remoteClients;
+ }
+
+ updateFxaDevices(devices) {
+ if (!devices) {
+ return {};
+ }
+ let me = devices.find(d => d.isCurrentDevice);
+ let id = me ? this.sanitizeFxaDeviceId(me.id) : undefined;
+ let cleanDevices = this.prepareFxaDevices(devices);
+ return { deviceID: id, devices: cleanDevices };
+ }
+
+ getFxaDevices() {
+ return lazy.fxAccounts.device.recentDeviceList;
+ }
+
+ getPingJSON(reason) {
+ let { devices, deviceID } = this.updateFxaDevices(this.getFxaDevices());
+ return {
+ os: lazy.TelemetryEnvironment.currentEnvironment.system.os,
+ why: reason,
+ devices,
+ discarded: this.discarded || undefined,
+ version: PING_FORMAT_VERSION,
+ syncs: this.payloads.slice(),
+ uid: this.lastUID,
+ syncNodeType: this.lastSyncNodeType || undefined,
+ deviceID,
+ sessionStartDate: this.sessionStartDate,
+ events: !this.events.length ? undefined : this.events,
+ migrations: !this.migrations.length ? undefined : this.migrations,
+ histograms: !Object.keys(this.histograms).length
+ ? undefined
+ : this.histograms,
+ };
+ }
+
+ _addMigrationRecord(type, info) {
+ log.debug("Saw telemetry migration info", type, info);
+ // Updates to this need to be documented in `sync-ping.rst`
+ switch (type) {
+ case "webext-storage":
+ this.migrations.push({
+ type: "webext-storage",
+ entries: +info.entries,
+ entriesSuccessful: +info.entries_successful,
+ extensions: +info.extensions,
+ extensionsSuccessful: +info.extensions_successful,
+ openFailure: !!info.open_failure,
+ });
+ break;
+ default:
+ throw new Error("Bug: Unknown migration record type " + type);
+ }
+ }
+
+ finish(reason) {
+ // Note that we might be in the middle of a sync right now, and so we don't
+ // want to touch this.current.
+ let result = this.getPingJSON(reason);
+ this.payloads = [];
+ this.discarded = 0;
+ this.events = [];
+ this.migrations = [];
+ this.histograms = {};
+ this.submit(result);
+ }
+
+ setupObservers() {
+ for (let topic of TOPICS) {
+ lazy.Observers.add(topic, this, this);
+ }
+ }
+
+ shutdown() {
+ this.finish("shutdown");
+ for (let topic of TOPICS) {
+ lazy.Observers.remove(topic, this, this);
+ }
+ }
+
+ submit(record) {
+ if (!this.isProductionSyncUser()) {
+ return false;
+ }
+ // We still call submit() with possibly illegal payloads so that tests can
+ // know that the ping was built. We don't end up submitting them, however.
+ let numEvents = record.events ? record.events.length : 0;
+ let numMigrations = record.migrations ? record.migrations.length : 0;
+ if (record.syncs.length || numEvents || numMigrations) {
+ log.trace(
+ `submitting ${record.syncs.length} sync record(s) and ` +
+ `${numEvents} event(s) to telemetry`
+ );
+ lazy.TelemetryController.submitExternalPing("sync", record, {
+ usePingSender: true,
+ }).catch(err => {
+ log.error("failed to submit ping", err);
+ });
+ return true;
+ }
+ return false;
+ }
+
+ isProductionSyncUser() {
+ // If FxA isn't production then we treat sync as not being production.
+ // Further, there's the deprecated "services.sync.tokenServerURI" pref we
+ // need to consider - fxa doesn't consider that as if that's the only
+ // pref set, they *are* running a production fxa, just not production sync.
+ if (
+ !lazy.FxAccounts.config.isProductionConfig() ||
+ Services.prefs.prefHasUserValue("services.sync.tokenServerURI")
+ ) {
+ log.trace(`Not sending telemetry ping for self-hosted Sync user`);
+ return false;
+ }
+ return true;
+ }
+
+ onSyncStarted(data) {
+ const why = data && JSON.parse(data).why;
+ if (this.current) {
+ log.warn(
+ "Observed weave:service:sync:start, but we're already recording a sync!"
+ );
+ // Just discard the old record, consistent with our handling of engines, above.
+ this.current = null;
+ }
+ this.current = new SyncRecord(this.allowedEngines, why);
+ }
+
+ // We need to ensure that the telemetry `deletion-request` ping always contains the user's
+ // current sync device ID, because if the user opts out of telemetry then the deletion ping
+ // will be immediately triggered for sending, and we won't have a chance to fill it in later.
+ // This keeps the `deletion-ping` up-to-date when the user's account state changes.
+ onAccountInitOrChange() {
+ // We don't submit sync pings for self-hosters, so don't need to collect their device ids either.
+ if (!this.isProductionSyncUser()) {
+ return;
+ }
+ // Awkwardly async, but no need to await. If the user's account state changes while
+ // this promise is in flight, it will reject and we won't record any data in the ping.
+ // (And a new notification will trigger us to try again with the new state).
+ lazy.fxAccounts.device
+ .getLocalId()
+ .then(deviceId => {
+ let sanitizedDeviceId =
+ lazy.fxAccounts.telemetry.sanitizeDeviceId(deviceId);
+ // In the past we did not persist the FxA metrics identifiers to disk,
+ // so this might be missing until we can fetch it from the server for the
+ // first time. There will be a fresh notification tirggered when it's available.
+ if (sanitizedDeviceId) {
+ // Sanitized device ids are 64 characters long, but telemetry limits scalar strings to 50.
+ // The first 32 chars are sufficient to uniquely identify the device, so just send those.
+ // It's hard to change the sync ping itself to only send 32 chars, to b/w compat reasons.
+ sanitizedDeviceId = sanitizedDeviceId.substr(0, 32);
+ Services.telemetry.scalarSet(
+ "deletion.request.sync_device_id",
+ sanitizedDeviceId
+ );
+ }
+ })
+ .catch(err => {
+ log.warn(
+ `Failed to set sync identifiers in the deletion-request ping: ${err}`
+ );
+ });
+ }
+
+ // This keeps the `deletion-request` ping up-to-date when the user signs out,
+ // clearing the now-nonexistent sync device id.
+ onAccountLogout() {
+ Services.telemetry.scalarSet("deletion.request.sync_device_id", "");
+ }
+
+ _checkCurrent(topic) {
+ if (!this.current) {
+ // This is only `info` because it happens when we do a tabs "quick-write"
+ log.info(
+ `Observed notification ${topic} but no current sync is being recorded.`
+ );
+ return false;
+ }
+ return true;
+ }
+
+ _shouldSubmitForDataChange() {
+ let newID = lazy.fxAccounts.telemetry.getSanitizedUID() || EMPTY_UID;
+ let oldID = this.lastUID;
+ if (
+ newID != EMPTY_UID &&
+ oldID != EMPTY_UID &&
+ // Both are "real" uids, so we care if they've changed.
+ newID != oldID
+ ) {
+ log.trace(
+ `shouldSubmitForDataChange - uid from '${oldID}' -> '${newID}'`
+ );
+ return true;
+ }
+ // We've gone from knowing one of the ids to not knowing it (which we
+ // ignore) or we've gone from not knowing it to knowing it (which is fine),
+ // Now check the node type because a change there also means we should
+ // submit.
+ if (
+ this.lastSyncNodeType &&
+ this.currentSyncNodeType != this.lastSyncNodeType
+ ) {
+ log.trace(
+ `shouldSubmitForDataChange - nodeType from '${this.lastSyncNodeType}' -> '${this.currentSyncNodeType}'`
+ );
+ return true;
+ }
+ log.trace("shouldSubmitForDataChange - no need to submit");
+ return false;
+ }
+
+ maybeSubmitForDataChange() {
+ if (this._shouldSubmitForDataChange()) {
+ log.info(
+ "Early submission of sync telemetry due to changed IDs/NodeType"
+ );
+ this.finish("idchange"); // this actually submits.
+ this.lastSubmissionTime = Services.telemetry.msSinceProcessStart();
+ }
+
+ // Only update the last UIDs if we actually know them.
+ let current_uid = lazy.fxAccounts.telemetry.getSanitizedUID();
+ if (current_uid) {
+ this.lastUID = current_uid;
+ }
+ if (this.currentSyncNodeType) {
+ this.lastSyncNodeType = this.currentSyncNodeType;
+ }
+ }
+
+ maybeSubmitForInterval() {
+ // We want to submit the ping every `this.submissionInterval` but only when
+ // there's no current sync in progress, otherwise we may end up submitting
+ // the sync and the events caused by it in different pings.
+ if (
+ this.current == null &&
+ Services.telemetry.msSinceProcessStart() - this.lastSubmissionTime >
+ this.submissionInterval
+ ) {
+ this.finish("schedule");
+ this.lastSubmissionTime = Services.telemetry.msSinceProcessStart();
+ }
+ }
+
+ onSyncFinished(error) {
+ if (!this.current) {
+ log.warn("onSyncFinished but we aren't recording");
+ return;
+ }
+ this.current.finished(error);
+ this.currentSyncNodeType = this.current.syncNodeType;
+ let current = this.current;
+ this.current = null;
+ this.takeTelemetryRecord(current);
+ }
+
+ takeTelemetryRecord(record) {
+ // We check for "data change" before appending the current sync to payloads,
+ // as it is the current sync which has the data with the new data, and thus
+ // must go in the *next* submission.
+ this.maybeSubmitForDataChange();
+ if (this.payloads.length < this.maxPayloadCount) {
+ this.payloads.push(record.toJSON());
+ } else {
+ ++this.discarded;
+ }
+ // If we are submitting due to timing, it's desirable that the most recent
+ // sync is included, so we check after appending the record.
+ this.maybeSubmitForInterval();
+ }
+
+ _addHistogram(hist) {
+ let histogram = Services.telemetry.getHistogramById(hist);
+ let s = histogram.snapshot();
+ this.histograms[hist] = s;
+ }
+
+ _recordEvent(eventDetails) {
+ this.maybeSubmitForDataChange();
+
+ if (this.events.length >= this.maxEventsCount) {
+ log.warn("discarding event - already queued our maximum", eventDetails);
+ return;
+ }
+
+ let { object, method, value, extra } = eventDetails;
+ if (extra) {
+ extra = normalizeExtraTelemetryFields(extra);
+ eventDetails = { object, method, value, extra };
+ }
+
+ if (!validateTelemetryEvent(eventDetails)) {
+ // we've already logged what the problem is...
+ return;
+ }
+ log.debug("recording event", eventDetails);
+
+ if (extra && lazy.Resource.serverTime && !extra.serverTime) {
+ extra.serverTime = String(lazy.Resource.serverTime);
+ }
+ let category = "sync";
+ let ts = Math.floor(tryGetMonotonicTimestamp());
+
+ // An event record is a simple array with at least 4 items.
+ let event = [ts, category, method, object];
+ // It may have up to 6 elements if |extra| is defined
+ if (value) {
+ event.push(value);
+ if (extra) {
+ event.push(extra);
+ }
+ } else if (extra) {
+ event.push(null); // a null for the empty value.
+ event.push(extra);
+ }
+ this.events.push(event);
+ this.maybeSubmitForInterval();
+ }
+
+ observe(subject, topic, data) {
+ log.trace(`observed ${topic} ${data}`);
+
+ switch (topic) {
+ case "weave:service:ready":
+ case "weave:service:login:got-hashed-id":
+ case "fxaccounts:new_device_id":
+ this.onAccountInitOrChange();
+ break;
+
+ case "fxaccounts:onlogout":
+ this.onAccountLogout();
+ break;
+
+ /* sync itself state changes */
+ case "weave:service:sync:start":
+ this.onSyncStarted(data);
+ break;
+
+ case "weave:service:sync:finish":
+ if (this._checkCurrent(topic)) {
+ this.onSyncFinished(null);
+ }
+ break;
+
+ case "weave:service:sync:error":
+ // argument needs to be truthy (this should always be the case)
+ this.onSyncFinished(subject || "Unknown");
+ break;
+
+ /* engine sync state changes */
+ case "weave:engine:sync:start":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStart(data);
+ }
+ break;
+ case "weave:engine:sync:finish":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStop(data, null);
+ }
+ break;
+
+ case "weave:engine:sync:error":
+ if (this._checkCurrent(topic)) {
+ // argument needs to be truthy (this should always be the case)
+ this.current.onEngineStop(data, subject || "Unknown");
+ }
+ break;
+
+ /* engine counts */
+ case "weave:engine:sync:applied":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineApplied(data, subject);
+ }
+ break;
+
+ case "weave:engine:sync:step":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStep(data, subject);
+ }
+ break;
+
+ case "weave:engine:sync:uploaded":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineUploaded(data, subject);
+ }
+ break;
+
+ case "weave:engine:validate:finish":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineValidated(data, subject);
+ }
+ break;
+
+ case "weave:engine:validate:error":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineValidateError(data, subject || "Unknown");
+ }
+ break;
+
+ case "weave:telemetry:event":
+ case "fxa:telemetry:event":
+ this._recordEvent(subject);
+ break;
+
+ case "weave:telemetry:histogram":
+ this._addHistogram(data);
+ break;
+
+ case "weave:telemetry:migration":
+ this._addMigrationRecord(data, subject);
+ break;
+
+ default:
+ log.warn(`unexpected observer topic ${topic}`);
+ break;
+ }
+ }
+
+ // Transform an exception into a standard description. Exposed here for when
+ // this module isn't directly responsible for knowing the transform should
+ // happen (for example, when including an error in the |extra| field of
+ // event telemetry)
+ transformError(error) {
+ // Certain parts of sync will use this pattern as a way to communicate to
+ // processIncoming to abort the processing. However, there's no guarantee
+ // this can only happen then.
+ if (typeof error == "object" && error.code && error.cause) {
+ error = error.cause;
+ }
+ if (lazy.Async.isShutdownException(error)) {
+ return { name: "shutdownerror" };
+ }
+
+ if (typeof error === "string") {
+ if (error.startsWith("error.")) {
+ // This is hacky, but I can't imagine that it's not also accurate.
+ return { name: "othererror", error };
+ }
+ error = ErrorSanitizer.cleanErrorMessage(error);
+ return { name: "unexpectederror", error };
+ }
+
+ if (error instanceof lazy.AuthenticationError) {
+ return { name: "autherror", from: error.source };
+ }
+
+ if (DOMException.isInstance(error)) {
+ return {
+ name: "unexpectederror",
+ error: ErrorSanitizer.cleanErrorMessage(error.message, error),
+ };
+ }
+
+ let httpCode =
+ error.status || (error.response && error.response.status) || error.code;
+
+ if (httpCode) {
+ return { name: "httperror", code: httpCode };
+ }
+
+ if (error.failureCode) {
+ return { name: "othererror", error: error.failureCode };
+ }
+
+ if (error.result) {
+ // many "nsresult" errors are actually network errors - if they are
+ // associated with the "network" module we assume that's true.
+ // We also assume NS_ERROR_ABORT is such an error - for almost everything
+ // we care about, it acually is (eg, if the connection fails early enough
+ // or if we have a captive portal etc) - we don't lose anything by this
+ // assumption, it's just that the error will no longer be in the "nserror"
+ // category, so our analysis can still find them.
+ if (
+ error.result == Cr.NS_ERROR_ABORT ||
+ NS_ERROR_GET_MODULE(error.result) == NS_ERROR_MODULE_NETWORK
+ ) {
+ return { name: "httperror", code: error.result };
+ }
+ return { name: "nserror", code: error.result };
+ }
+ // It's probably an Error object, but it also could be some
+ // other object that may or may not override toString to do
+ // something useful.
+ let msg = String(error);
+ if (msg.startsWith("[object")) {
+ // Nothing useful in the default, check for a string "message" property.
+ if (typeof error.message == "string") {
+ msg = String(error.message);
+ } else {
+ // Hopefully it won't come to this...
+ msg = JSON.stringify(error);
+ }
+ }
+ return {
+ name: "unexpectederror",
+ error: ErrorSanitizer.cleanErrorMessage(msg),
+ };
+ }
+}
+
+export var SyncTelemetry = new SyncTelemetryImpl(ENGINES);
diff --git a/services/sync/modules/util.sys.mjs b/services/sync/modules/util.sys.mjs
new file mode 100644
index 0000000000..e68ae0e19f
--- /dev/null
+++ b/services/sync/modules/util.sys.mjs
@@ -0,0 +1,780 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { Observers } from "resource://services-common/observers.sys.mjs";
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { CryptoUtils } from "resource://services-crypto/utils.sys.mjs";
+
+import {
+ DEVICE_TYPE_DESKTOP,
+ MAXIMUM_BACKOFF_INTERVAL,
+ PREFS_BRANCH,
+ SYNC_KEY_DECODED_LENGTH,
+ SYNC_KEY_ENCODED_LENGTH,
+ WEAVE_VERSION,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+const lazy = {};
+import * as FxAccountsCommon from "resource://gre/modules/FxAccountsCommon.sys.mjs";
+
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "cryptoSDR",
+ "@mozilla.org/login-manager/crypto/SDR;1",
+ "nsILoginManagerCrypto"
+);
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "localDeviceType",
+ "services.sync.client.type",
+ DEVICE_TYPE_DESKTOP
+);
+
+/*
+ * Custom exception types.
+ */
+class LockException extends Error {
+ constructor(message) {
+ super(message);
+ this.name = "LockException";
+ }
+}
+
+class HMACMismatch extends Error {
+ constructor(message) {
+ super(message);
+ this.name = "HMACMismatch";
+ }
+}
+
+/*
+ * Utility functions
+ */
+export var Utils = {
+ // Aliases from CryptoUtils.
+ generateRandomBytesLegacy: CryptoUtils.generateRandomBytesLegacy,
+ computeHTTPMACSHA1: CryptoUtils.computeHTTPMACSHA1,
+ digestUTF8: CryptoUtils.digestUTF8,
+ digestBytes: CryptoUtils.digestBytes,
+ sha256: CryptoUtils.sha256,
+ hkdfExpand: CryptoUtils.hkdfExpand,
+ pbkdf2Generate: CryptoUtils.pbkdf2Generate,
+ getHTTPMACSHA1Header: CryptoUtils.getHTTPMACSHA1Header,
+
+ /**
+ * The string to use as the base User-Agent in Sync requests.
+ * This string will look something like
+ *
+ * Firefox/49.0a1 (Windows NT 6.1; WOW64; rv:46.0) FxSync/1.51.0.20160516142357.desktop
+ */
+ _userAgent: null,
+ get userAgent() {
+ if (!this._userAgent) {
+ let hph = Cc["@mozilla.org/network/protocol;1?name=http"].getService(
+ Ci.nsIHttpProtocolHandler
+ );
+ /* eslint-disable no-multi-spaces */
+ this._userAgent =
+ Services.appinfo.name +
+ "/" +
+ Services.appinfo.version + // Product.
+ " (" +
+ hph.oscpu +
+ ")" + // (oscpu)
+ " FxSync/" +
+ WEAVE_VERSION +
+ "." + // Sync.
+ Services.appinfo.appBuildID +
+ "."; // Build.
+ /* eslint-enable no-multi-spaces */
+ }
+ return this._userAgent + lazy.localDeviceType;
+ },
+
+ /**
+ * Wrap a [promise-returning] function to catch all exceptions and log them.
+ *
+ * @usage MyObj._catch = Utils.catch;
+ * MyObj.foo = function() { this._catch(func)(); }
+ *
+ * Optionally pass a function which will be called if an
+ * exception occurs.
+ */
+ catch(func, exceptionCallback) {
+ let thisArg = this;
+ return async function WrappedCatch() {
+ try {
+ return await func.call(thisArg);
+ } catch (ex) {
+ thisArg._log.debug(
+ "Exception calling " + (func.name || "anonymous function"),
+ ex
+ );
+ if (exceptionCallback) {
+ return exceptionCallback.call(thisArg, ex);
+ }
+ return null;
+ }
+ };
+ },
+
+ throwLockException(label) {
+ throw new LockException(`Could not acquire lock. Label: "${label}".`);
+ },
+
+ /**
+ * Wrap a [promise-returning] function to call lock before calling the function
+ * then unlock when it finishes executing or if it threw an error.
+ *
+ * @usage MyObj._lock = Utils.lock;
+ * MyObj.foo = async function() { await this._lock(func)(); }
+ */
+ lock(label, func) {
+ let thisArg = this;
+ return async function WrappedLock() {
+ if (!thisArg.lock()) {
+ Utils.throwLockException(label);
+ }
+
+ try {
+ return await func.call(thisArg);
+ } finally {
+ thisArg.unlock();
+ }
+ };
+ },
+
+ isLockException: function isLockException(ex) {
+ return ex instanceof LockException;
+ },
+
+ /**
+ * Wrap [promise-returning] functions to notify when it starts and
+ * finishes executing or if it threw an error.
+ *
+ * The message is a combination of a provided prefix, the local name, and
+ * the event. Possible events are: "start", "finish", "error". The subject
+ * is the function's return value on "finish" or the caught exception on
+ * "error". The data argument is the predefined data value.
+ *
+ * Example:
+ *
+ * @usage function MyObj(name) {
+ * this.name = name;
+ * this._notify = Utils.notify("obj:");
+ * }
+ * MyObj.prototype = {
+ * foo: function() this._notify("func", "data-arg", async function () {
+ * //...
+ * }(),
+ * };
+ */
+ notify(prefix) {
+ return function NotifyMaker(name, data, func) {
+ let thisArg = this;
+ let notify = function (state, subject) {
+ let mesg = prefix + name + ":" + state;
+ thisArg._log.trace("Event: " + mesg);
+ Observers.notify(mesg, subject, data);
+ };
+
+ return async function WrappedNotify() {
+ notify("start", null);
+ try {
+ let ret = await func.call(thisArg);
+ notify("finish", ret);
+ return ret;
+ } catch (ex) {
+ notify("error", ex);
+ throw ex;
+ }
+ };
+ };
+ },
+
+ /**
+ * GUIDs are 9 random bytes encoded with base64url (RFC 4648).
+ * That makes them 12 characters long with 72 bits of entropy.
+ */
+ makeGUID: function makeGUID() {
+ return CommonUtils.encodeBase64URL(Utils.generateRandomBytesLegacy(9));
+ },
+
+ _base64url_regex: /^[-abcdefghijklmnopqrstuvwxyz0123456789_]{12}$/i,
+ checkGUID: function checkGUID(guid) {
+ return !!guid && this._base64url_regex.test(guid);
+ },
+
+ /**
+ * Add a simple getter/setter to an object that defers access of a property
+ * to an inner property.
+ *
+ * @param obj
+ * Object to add properties to defer in its prototype
+ * @param defer
+ * Property of obj to defer to
+ * @param prop
+ * Property name to defer (or an array of property names)
+ */
+ deferGetSet: function Utils_deferGetSet(obj, defer, prop) {
+ if (Array.isArray(prop)) {
+ return prop.map(prop => Utils.deferGetSet(obj, defer, prop));
+ }
+
+ let prot = obj.prototype;
+
+ // Create a getter if it doesn't exist yet
+ if (!prot.__lookupGetter__(prop)) {
+ prot.__defineGetter__(prop, function () {
+ return this[defer][prop];
+ });
+ }
+
+ // Create a setter if it doesn't exist yet
+ if (!prot.__lookupSetter__(prop)) {
+ prot.__defineSetter__(prop, function (val) {
+ this[defer][prop] = val;
+ });
+ }
+ },
+
+ deepEquals: function eq(a, b) {
+ // If they're triple equals, then it must be equals!
+ if (a === b) {
+ return true;
+ }
+
+ // If they weren't equal, they must be objects to be different
+ if (typeof a != "object" || typeof b != "object") {
+ return false;
+ }
+
+ // But null objects won't have properties to compare
+ if (a === null || b === null) {
+ return false;
+ }
+
+ // Make sure all of a's keys have a matching value in b
+ for (let k in a) {
+ if (!eq(a[k], b[k])) {
+ return false;
+ }
+ }
+
+ // Do the same for b's keys but skip those that we already checked
+ for (let k in b) {
+ if (!(k in a) && !eq(a[k], b[k])) {
+ return false;
+ }
+ }
+
+ return true;
+ },
+
+ // Generator and discriminator for HMAC exceptions.
+ // Split these out in case we want to make them richer in future, and to
+ // avoid inevitable confusion if the message changes.
+ throwHMACMismatch: function throwHMACMismatch(shouldBe, is) {
+ throw new HMACMismatch(
+ `Record SHA256 HMAC mismatch: should be ${shouldBe}, is ${is}`
+ );
+ },
+
+ isHMACMismatch: function isHMACMismatch(ex) {
+ return ex instanceof HMACMismatch;
+ },
+
+ /**
+ * Turn RFC 4648 base32 into our own user-friendly version.
+ * ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
+ * becomes
+ * abcdefghijk8mn9pqrstuvwxyz234567
+ */
+ base32ToFriendly: function base32ToFriendly(input) {
+ return input.toLowerCase().replace(/l/g, "8").replace(/o/g, "9");
+ },
+
+ base32FromFriendly: function base32FromFriendly(input) {
+ return input.toUpperCase().replace(/8/g, "L").replace(/9/g, "O");
+ },
+
+ /**
+ * Key manipulation.
+ */
+
+ // Return an octet string in friendly base32 *with no trailing =*.
+ encodeKeyBase32: function encodeKeyBase32(keyData) {
+ return Utils.base32ToFriendly(CommonUtils.encodeBase32(keyData)).slice(
+ 0,
+ SYNC_KEY_ENCODED_LENGTH
+ );
+ },
+
+ decodeKeyBase32: function decodeKeyBase32(encoded) {
+ return CommonUtils.decodeBase32(
+ Utils.base32FromFriendly(Utils.normalizePassphrase(encoded))
+ ).slice(0, SYNC_KEY_DECODED_LENGTH);
+ },
+
+ jsonFilePath(...args) {
+ let [fileName] = args.splice(-1);
+
+ return PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...args,
+ `${fileName}.json`
+ );
+ },
+
+ /**
+ * Load a JSON file from disk in the profile directory.
+ *
+ * @param filePath
+ * JSON file path load from profile. Loaded file will be
+ * extension.
+ * @param that
+ * Object to use for logging.
+ *
+ * @return Promise<>
+ * Promise resolved when the write has been performed.
+ */
+ async jsonLoad(filePath, that) {
+ let path;
+ if (Array.isArray(filePath)) {
+ path = Utils.jsonFilePath(...filePath);
+ } else {
+ path = Utils.jsonFilePath(filePath);
+ }
+
+ if (that._log && that._log.trace) {
+ that._log.trace("Loading json from disk: " + path);
+ }
+
+ try {
+ return await IOUtils.readJSON(path);
+ } catch (e) {
+ if (!DOMException.isInstance(e) || e.name !== "NotFoundError") {
+ if (that._log) {
+ that._log.debug("Failed to load json", e);
+ }
+ }
+ return null;
+ }
+ },
+
+ /**
+ * Save a json-able object to disk in the profile directory.
+ *
+ * @param filePath
+ * JSON file path save to <filePath>.json
+ * @param that
+ * Object to use for logging.
+ * @param obj
+ * Function to provide json-able object to save. If this isn't a
+ * function, it'll be used as the object to make a json string.*
+ * Function called when the write has been performed. Optional.
+ *
+ * @return Promise<>
+ * Promise resolved when the write has been performed.
+ */
+ async jsonSave(filePath, that, obj) {
+ let path = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(filePath + ".json").split("/")
+ );
+ let dir = PathUtils.parent(path);
+
+ await IOUtils.makeDirectory(dir, { createAncestors: true });
+
+ if (that._log) {
+ that._log.trace("Saving json to disk: " + path);
+ }
+
+ let json = typeof obj == "function" ? obj.call(that) : obj;
+
+ return IOUtils.writeJSON(path, json);
+ },
+
+ /**
+ * Helper utility function to fit an array of records so that when serialized,
+ * they will be within payloadSizeMaxBytes. Returns a new array without the
+ * items.
+ *
+ * Note: This shouldn't be used for extremely large record sizes as
+ * it uses JSON.stringify, which could lead to a heavy performance hit.
+ * See Bug 1815151 for more details.
+ *
+ */
+ tryFitItems(records, payloadSizeMaxBytes) {
+ // Copy this so that callers don't have to do it in advance.
+ records = records.slice();
+ let encoder = Utils.utf8Encoder;
+ const computeSerializedSize = () =>
+ encoder.encode(JSON.stringify(records)).byteLength;
+ // Figure out how many records we can pack into a payload.
+ // We use byteLength here because the data is not encrypted in ascii yet.
+ let size = computeSerializedSize();
+ // See bug 535326 comment 8 for an explanation of the estimation
+ const maxSerializedSize = (payloadSizeMaxBytes / 4) * 3 - 1500;
+ if (maxSerializedSize < 0) {
+ // This is probably due to a test, but it causes very bad behavior if a
+ // test causes this accidentally. We could throw, but there's an obvious/
+ // natural way to handle it, so we do that instead (otherwise we'd have a
+ // weird lower bound of ~1125b on the max record payload size).
+ return [];
+ }
+ if (size > maxSerializedSize) {
+ // Estimate a little more than the direct fraction to maximize packing
+ let cutoff = Math.ceil((records.length * maxSerializedSize) / size);
+ records = records.slice(0, cutoff + 1);
+
+ // Keep dropping off the last entry until the data fits.
+ while (computeSerializedSize() > maxSerializedSize) {
+ records.pop();
+ }
+ }
+ return records;
+ },
+
+ /**
+ * Move a json file in the profile directory. Will fail if a file exists at the
+ * destination.
+ *
+ * @returns a promise that resolves to undefined on success, or rejects on failure
+ *
+ * @param aFrom
+ * Current path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param aTo
+ * New path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param that
+ * Object to use for logging
+ */
+ jsonMove(aFrom, aTo, that) {
+ let pathFrom = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(aFrom + ".json").split("/")
+ );
+ let pathTo = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(aTo + ".json").split("/")
+ );
+ if (that._log) {
+ that._log.trace("Moving " + pathFrom + " to " + pathTo);
+ }
+ return IOUtils.move(pathFrom, pathTo, { noOverwrite: true });
+ },
+
+ /**
+ * Removes a json file in the profile directory.
+ *
+ * @returns a promise that resolves to undefined on success, or rejects on failure
+ *
+ * @param filePath
+ * Current path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param that
+ * Object to use for logging
+ */
+ jsonRemove(filePath, that) {
+ let path = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(filePath + ".json").split("/")
+ );
+ if (that._log) {
+ that._log.trace("Deleting " + path);
+ }
+ return IOUtils.remove(path, { ignoreAbsent: true });
+ },
+
+ /**
+ * The following are the methods supported for UI use:
+ *
+ * * isPassphrase:
+ * determines whether a string is either a normalized or presentable
+ * passphrase.
+ * * normalizePassphrase:
+ * take a presentable passphrase and reduce it to a normalized
+ * representation for storage. normalizePassphrase can safely be called
+ * on normalized input.
+ */
+
+ isPassphrase(s) {
+ if (s) {
+ return /^[abcdefghijkmnpqrstuvwxyz23456789]{26}$/.test(
+ Utils.normalizePassphrase(s)
+ );
+ }
+ return false;
+ },
+
+ normalizePassphrase: function normalizePassphrase(pp) {
+ // Short var name... have you seen the lines below?!
+ // Allow leading and trailing whitespace.
+ pp = pp.trim().toLowerCase();
+
+ // 20-char sync key.
+ if (pp.length == 23 && [5, 11, 17].every(i => pp[i] == "-")) {
+ return (
+ pp.slice(0, 5) + pp.slice(6, 11) + pp.slice(12, 17) + pp.slice(18, 23)
+ );
+ }
+
+ // "Modern" 26-char key.
+ if (pp.length == 31 && [1, 7, 13, 19, 25].every(i => pp[i] == "-")) {
+ return (
+ pp.slice(0, 1) +
+ pp.slice(2, 7) +
+ pp.slice(8, 13) +
+ pp.slice(14, 19) +
+ pp.slice(20, 25) +
+ pp.slice(26, 31)
+ );
+ }
+
+ // Something else -- just return.
+ return pp;
+ },
+
+ /**
+ * Create an array like the first but without elements of the second. Reuse
+ * arrays if possible.
+ */
+ arraySub: function arraySub(minuend, subtrahend) {
+ if (!minuend.length || !subtrahend.length) {
+ return minuend;
+ }
+ let setSubtrahend = new Set(subtrahend);
+ return minuend.filter(i => !setSubtrahend.has(i));
+ },
+
+ /**
+ * Build the union of two arrays. Reuse arrays if possible.
+ */
+ arrayUnion: function arrayUnion(foo, bar) {
+ if (!foo.length) {
+ return bar;
+ }
+ if (!bar.length) {
+ return foo;
+ }
+ return foo.concat(Utils.arraySub(bar, foo));
+ },
+
+ /**
+ * Add all the items in `items` to the provided Set in-place.
+ *
+ * @return The provided set.
+ */
+ setAddAll(set, items) {
+ for (let item of items) {
+ set.add(item);
+ }
+ return set;
+ },
+
+ /**
+ * Delete every items in `items` to the provided Set in-place.
+ *
+ * @return The provided set.
+ */
+ setDeleteAll(set, items) {
+ for (let item of items) {
+ set.delete(item);
+ }
+ return set;
+ },
+
+ /**
+ * Take the first `size` items from the Set `items`.
+ *
+ * @return A Set of size at most `size`
+ */
+ subsetOfSize(items, size) {
+ let result = new Set();
+ let count = 0;
+ for (let item of items) {
+ if (count++ == size) {
+ return result;
+ }
+ result.add(item);
+ }
+ return result;
+ },
+
+ bind2: function Async_bind2(object, method) {
+ return function innerBind() {
+ return method.apply(object, arguments);
+ };
+ },
+
+ /**
+ * Is there a master password configured and currently locked?
+ */
+ mpLocked() {
+ return !lazy.cryptoSDR.isLoggedIn;
+ },
+
+ // If Master Password is enabled and locked, present a dialog to unlock it.
+ // Return whether the system is unlocked.
+ ensureMPUnlocked() {
+ if (lazy.cryptoSDR.uiBusy) {
+ return false;
+ }
+ try {
+ lazy.cryptoSDR.encrypt("bacon");
+ return true;
+ } catch (e) {}
+ return false;
+ },
+
+ /**
+ * Return a value for a backoff interval. Maximum is eight hours, unless
+ * Status.backoffInterval is higher.
+ *
+ */
+ calculateBackoff: function calculateBackoff(
+ attempts,
+ baseInterval,
+ statusInterval
+ ) {
+ let backoffInterval =
+ attempts * (Math.floor(Math.random() * baseInterval) + baseInterval);
+ return Math.max(
+ Math.min(backoffInterval, MAXIMUM_BACKOFF_INTERVAL),
+ statusInterval
+ );
+ },
+
+ /**
+ * Return a set of hostnames (including the protocol) which may have
+ * credentials for sync itself stored in the login manager.
+ *
+ * In general, these hosts will not have their passwords synced, will be
+ * reset when we drop sync credentials, etc.
+ */
+ getSyncCredentialsHosts() {
+ let result = new Set();
+ // the FxA host
+ result.add(FxAccountsCommon.FXA_PWDMGR_HOST);
+ // We used to include the FxA hosts (hence the Set() result) but we now
+ // don't give them special treatment (hence the Set() with exactly 1 item)
+ return result;
+ },
+
+ /**
+ * Helper to implement a more efficient version of fairly common pattern:
+ *
+ * Utils.defineLazyIDProperty(this, "syncID", "services.sync.client.syncID")
+ *
+ * is equivalent to (but more efficient than) the following:
+ *
+ * Foo.prototype = {
+ * ...
+ * get syncID() {
+ * let syncID = Svc.PrefBranch.getStringPref("client.syncID", "");
+ * return syncID == "" ? this.syncID = Utils.makeGUID() : syncID;
+ * },
+ * set syncID(value) {
+ * Svc.PrefBranch.setStringPref("client.syncID", value);
+ * },
+ * ...
+ * };
+ */
+ defineLazyIDProperty(object, propName, prefName) {
+ // An object that exists to be the target of the lazy pref getter.
+ // We can't use `object` (at least, not using `propName`) since XPCOMUtils
+ // will stomp on any setter we define.
+ const storage = {};
+ XPCOMUtils.defineLazyPreferenceGetter(storage, "value", prefName, "");
+ Object.defineProperty(object, propName, {
+ configurable: true,
+ enumerable: true,
+ get() {
+ let value = storage.value;
+ if (!value) {
+ value = Utils.makeGUID();
+ Services.prefs.setStringPref(prefName, value);
+ }
+ return value;
+ },
+ set(value) {
+ Services.prefs.setStringPref(prefName, value);
+ },
+ });
+ },
+
+ getDeviceType() {
+ return lazy.localDeviceType;
+ },
+
+ formatTimestamp(date) {
+ // Format timestamp as: "%Y-%m-%d %H:%M:%S"
+ let year = String(date.getFullYear());
+ let month = String(date.getMonth() + 1).padStart(2, "0");
+ let day = String(date.getDate()).padStart(2, "0");
+ let hours = String(date.getHours()).padStart(2, "0");
+ let minutes = String(date.getMinutes()).padStart(2, "0");
+ let seconds = String(date.getSeconds()).padStart(2, "0");
+
+ return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`;
+ },
+
+ *walkTree(tree, parent = null) {
+ if (tree) {
+ // Skip root node
+ if (parent) {
+ yield [tree, parent];
+ }
+ if (tree.children) {
+ for (let child of tree.children) {
+ yield* Utils.walkTree(child, tree);
+ }
+ }
+ }
+ },
+};
+
+/**
+ * A subclass of Set that serializes as an Array when passed to JSON.stringify.
+ */
+export class SerializableSet extends Set {
+ toJSON() {
+ return Array.from(this);
+ }
+}
+
+ChromeUtils.defineLazyGetter(Utils, "_utf8Converter", function () {
+ let converter = Cc[
+ "@mozilla.org/intl/scriptableunicodeconverter"
+ ].createInstance(Ci.nsIScriptableUnicodeConverter);
+ converter.charset = "UTF-8";
+ return converter;
+});
+
+ChromeUtils.defineLazyGetter(Utils, "utf8Encoder", () => new TextEncoder());
+
+/*
+ * Commonly-used services
+ */
+export var Svc = {};
+
+Svc.PrefBranch = Services.prefs.getBranch(PREFS_BRANCH);
+Svc.Obs = Observers;
+
+Svc.Obs.add("xpcom-shutdown", function () {
+ for (let name in Svc) {
+ delete Svc[name];
+ }
+});
diff --git a/services/sync/moz.build b/services/sync/moz.build
new file mode 100644
index 0000000000..a00472056e
--- /dev/null
+++ b/services/sync/moz.build
@@ -0,0 +1,72 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Firefox", "Sync")
+
+XPCSHELL_TESTS_MANIFESTS += ["tests/unit/xpcshell.toml"]
+
+EXTRA_COMPONENTS += [
+ "SyncComponents.manifest",
+]
+
+EXTRA_JS_MODULES["services-sync"] += [
+ "modules/addonsreconciler.sys.mjs",
+ "modules/addonutils.sys.mjs",
+ "modules/bridged_engine.sys.mjs",
+ "modules/collection_validator.sys.mjs",
+ "modules/constants.sys.mjs",
+ "modules/doctor.sys.mjs",
+ "modules/engines.sys.mjs",
+ "modules/keys.sys.mjs",
+ "modules/main.sys.mjs",
+ "modules/policies.sys.mjs",
+ "modules/record.sys.mjs",
+ "modules/resource.sys.mjs",
+ "modules/service.sys.mjs",
+ "modules/status.sys.mjs",
+ "modules/sync_auth.sys.mjs",
+ "modules/SyncDisconnect.sys.mjs",
+ "modules/SyncedTabs.sys.mjs",
+ "modules/telemetry.sys.mjs",
+ "modules/UIState.sys.mjs",
+ "modules/util.sys.mjs",
+ "Weave.sys.mjs",
+]
+
+EXTRA_JS_MODULES["services-sync"].engines += [
+ "modules/engines/addons.sys.mjs",
+ "modules/engines/clients.sys.mjs",
+ "modules/engines/extension-storage.sys.mjs",
+ "modules/engines/passwords.sys.mjs",
+ "modules/engines/prefs.sys.mjs",
+]
+
+if not CONFIG["MOZ_THUNDERBIRD"]:
+ EXTRA_JS_MODULES["services-sync"].engines += [
+ "modules/engines/bookmarks.sys.mjs",
+ "modules/engines/forms.sys.mjs",
+ "modules/engines/history.sys.mjs",
+ "modules/engines/tabs.sys.mjs",
+ ]
+
+EXTRA_JS_MODULES["services-sync"].stages += [
+ "modules/stages/declined.sys.mjs",
+ "modules/stages/enginesync.sys.mjs",
+]
+
+XPCOM_MANIFESTS += [
+ "components.conf",
+]
+
+TESTING_JS_MODULES.services.sync += [
+ "modules-testing/fakeservices.sys.mjs",
+ "modules-testing/fxa_utils.sys.mjs",
+ "modules-testing/rotaryengine.sys.mjs",
+ "modules-testing/utils.sys.mjs",
+]
+
+SPHINX_TREES["/services/sync"] = "docs"
diff --git a/services/sync/tests/tps/.eslintrc.js b/services/sync/tests/tps/.eslintrc.js
new file mode 100644
index 0000000000..182e87933b
--- /dev/null
+++ b/services/sync/tests/tps/.eslintrc.js
@@ -0,0 +1,28 @@
+"use strict";
+
+module.exports = {
+ globals: {
+ // Injected into tests via tps.jsm
+ Addons: false,
+ Addresses: false,
+ Bookmarks: false,
+ CreditCards: false,
+ EnableEngines: false,
+ EnsureTracking: false,
+ ExtStorage: false,
+ Formdata: false,
+ History: false,
+ Login: false,
+ Passwords: false,
+ Phase: false,
+ Prefs: false,
+ STATE_DISABLED: false,
+ STATE_ENABLED: false,
+ Sync: false,
+ SYNC_WIPE_CLIENT: false,
+ SYNC_WIPE_REMOTE: false,
+ Tabs: false,
+ Windows: false,
+ WipeServer: false,
+ },
+};
diff --git a/services/sync/tests/tps/addons/api/restartless-xpi@tests.mozilla.org.json b/services/sync/tests/tps/addons/api/restartless-xpi@tests.mozilla.org.json
new file mode 100644
index 0000000000..8593bad089
--- /dev/null
+++ b/services/sync/tests/tps/addons/api/restartless-xpi@tests.mozilla.org.json
@@ -0,0 +1,21 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "Restartless Test XPI",
+ "type": "extension",
+ "guid": "restartless-xpi@tests.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 485,
+ "url": "http://127.0.0.1:4567/addons/restartless.xpi"
+ }
+ ]
+ },
+ "last_updated": "2011-09-05T20:42:09Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/tps/addons/api/test-webext@quality.mozilla.org.json b/services/sync/tests/tps/addons/api/test-webext@quality.mozilla.org.json
new file mode 100644
index 0000000000..298ecc2ead
--- /dev/null
+++ b/services/sync/tests/tps/addons/api/test-webext@quality.mozilla.org.json
@@ -0,0 +1,21 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "Tet Webext XPI",
+ "type": "extension",
+ "guid": "test-webext@quality.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 3412,
+ "url": "http://127.0.0.1:4567/addons/webextension.xpi"
+ }
+ ]
+ },
+ "last_updated": "2018-04-17T18:24:42Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/tps/addons/restartless.xpi b/services/sync/tests/tps/addons/restartless.xpi
new file mode 100644
index 0000000000..973bc00cb5
--- /dev/null
+++ b/services/sync/tests/tps/addons/restartless.xpi
Binary files differ
diff --git a/services/sync/tests/tps/addons/webextension.xpi b/services/sync/tests/tps/addons/webextension.xpi
new file mode 100644
index 0000000000..0ed64f79ac
--- /dev/null
+++ b/services/sync/tests/tps/addons/webextension.xpi
Binary files differ
diff --git a/services/sync/tests/tps/all_tests.json b/services/sync/tests/tps/all_tests.json
new file mode 100644
index 0000000000..ea92e04b8f
--- /dev/null
+++ b/services/sync/tests/tps/all_tests.json
@@ -0,0 +1,34 @@
+{
+ "tests": {
+ "test_bookmark_conflict.js": {},
+ "test_sync.js": {},
+ "test_prefs.js": {},
+ "test_tabs.js": {},
+ "test_passwords.js": {},
+ "test_history.js": {},
+ "test_formdata.js": {},
+ "test_bug530717.js": {},
+ "test_bug531489.js": {},
+ "test_bug538298.js": {},
+ "test_bug556509.js": {},
+ "test_bug562515.js": {},
+ "test_bug535326.js": {},
+ "test_bug501528.js": {},
+ "test_bug575423.js": {},
+ "test_bug546807.js": {},
+ "test_history_collision.js": {},
+ "test_privbrw_passwords.js": {},
+ "test_privbrw_tabs.js": {},
+ "test_bookmarks_in_same_named_folder.js": {},
+ "test_client_wipe.js": {},
+ "test_special_tabs.js": {},
+ "test_addon_restartless_xpi.js": { "disabled": "Bug 1498974" },
+ "test_addon_webext_xpi.js": { "disabled": "Bug 1498974" },
+ "test_addon_reconciling.js": { "disabled": "Bug 1498974" },
+ "test_addon_wipe.js": { "disabled": "Bug 1498974" },
+ "test_existing_bookmarks.js": {},
+ "test_addresses.js": {},
+ "test_creditcards.js": {},
+ "test_extstorage.js": {}
+ }
+}
diff --git a/services/sync/tests/tps/test_addon_reconciling.js b/services/sync/tests/tps/test_addon_reconciling.js
new file mode 100644
index 0000000000..9c928734c8
--- /dev/null
+++ b/services/sync/tests/tps/test_addon_reconciling.js
@@ -0,0 +1,45 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// This test verifies that record reconciling works as expected. It makes
+// similar changes to add-ons in separate profiles and does a sync to verify
+// the proper action is taken.
+EnableEngines(["addons"]);
+
+var phases = {
+ phase01: "profile1",
+ phase02: "profile2",
+ phase03: "profile1",
+ phase04: "profile2",
+ phase05: "profile1",
+ phase06: "profile2",
+};
+
+const id = "restartless-xpi@tests.mozilla.org";
+
+// Install the add-on in 2 profiles.
+Phase("phase01", [
+ [Addons.verifyNot, [id]],
+ [Addons.install, [id]],
+ [Addons.verify, [id], STATE_ENABLED],
+ [Sync],
+]);
+Phase("phase02", [
+ [Addons.verifyNot, [id]],
+ [Sync],
+ [Addons.verify, [id], STATE_ENABLED],
+]);
+
+// Now we disable in one and uninstall in the other.
+Phase("phase03", [
+ [Sync], // Get GUID updates, potentially.
+ [Addons.setEnabled, [id], STATE_DISABLED],
+ // We've changed the state, but don't want this profile to sync until phase5,
+ // so if we ran a validation now we'd be expecting to find errors.
+ [Addons.skipValidation],
+]);
+Phase("phase04", [[EnsureTracking], [Addons.uninstall, [id]], [Sync]]);
+
+// When we sync, the uninstall should take precedence because it was newer.
+Phase("phase05", [[Sync]]);
+Phase("phase06", [[Sync], [Addons.verifyNot, [id]]]);
diff --git a/services/sync/tests/tps/test_addon_restartless_xpi.js b/services/sync/tests/tps/test_addon_restartless_xpi.js
new file mode 100644
index 0000000000..2b14245a6e
--- /dev/null
+++ b/services/sync/tests/tps/test_addon_restartless_xpi.js
@@ -0,0 +1,64 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// This test verifies that install of restartless extensions syncs to
+// other profiles.
+EnableEngines(["addons"]);
+
+var phases = {
+ phase01: "profile1",
+ phase02: "profile2",
+ phase03: "profile1",
+ phase04: "profile2",
+ phase05: "profile1",
+ phase06: "profile2",
+ phase07: "profile1",
+ phase08: "profile2",
+};
+
+const id = "restartless-xpi@tests.mozilla.org";
+
+// Verify install is synced
+Phase("phase01", [
+ [Addons.verifyNot, [id]],
+ [Addons.install, [id]],
+ [Addons.verify, [id], STATE_ENABLED],
+ [Sync],
+]);
+Phase("phase02", [
+ [Addons.verifyNot, [id]],
+ [Sync],
+ [Addons.verify, [id], STATE_ENABLED],
+]);
+
+// Now disable and see that is is synced.
+Phase("phase03", [
+ [EnsureTracking],
+ [Addons.setEnabled, [id], STATE_DISABLED],
+ [Addons.verify, [id], STATE_DISABLED],
+ [Sync],
+]);
+Phase("phase04", [[Sync], [Addons.verify, [id], STATE_DISABLED]]);
+
+// Enable and see it is synced.
+Phase("phase05", [
+ [EnsureTracking],
+ [Addons.setEnabled, [id], STATE_ENABLED],
+ [Addons.verify, [id], STATE_ENABLED],
+ [Sync],
+]);
+Phase("phase06", [[Sync], [Addons.verify, [id], STATE_ENABLED]]);
+
+// Uninstall and see it is synced.
+Phase("phase07", [
+ [EnsureTracking],
+ [Addons.verify, [id], STATE_ENABLED],
+ [Addons.uninstall, [id]],
+ [Addons.verifyNot, [id]],
+ [Sync],
+]);
+Phase("phase08", [
+ [Addons.verify, [id], STATE_ENABLED],
+ [Sync],
+ [Addons.verifyNot, [id]],
+]);
diff --git a/services/sync/tests/tps/test_addon_webext_xpi.js b/services/sync/tests/tps/test_addon_webext_xpi.js
new file mode 100644
index 0000000000..27065ee6af
--- /dev/null
+++ b/services/sync/tests/tps/test_addon_webext_xpi.js
@@ -0,0 +1,65 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// This test verifies that install of web extensions sync to other profiles.
+// It's more or less copied from test_addon_restartless_xpi with a different id.
+
+EnableEngines(["addons"]);
+
+var phases = {
+ phase01: "profile1",
+ phase02: "profile2",
+ phase03: "profile1",
+ phase04: "profile2",
+ phase05: "profile1",
+ phase06: "profile2",
+ phase07: "profile1",
+ phase08: "profile2",
+};
+
+const id = "test-webext@quality.mozilla.org";
+
+// Verify install is synced
+Phase("phase01", [
+ [Addons.verifyNot, [id]],
+ [Addons.install, [id]],
+ [Addons.verify, [id], STATE_ENABLED],
+ [Sync],
+]);
+Phase("phase02", [
+ [Addons.verifyNot, [id]],
+ [Sync],
+ [Addons.verify, [id], STATE_ENABLED],
+]);
+
+// Now disable and see that is is synced.
+Phase("phase03", [
+ [EnsureTracking],
+ [Addons.setEnabled, [id], STATE_DISABLED],
+ [Addons.verify, [id], STATE_DISABLED],
+ [Sync],
+]);
+Phase("phase04", [[Sync], [Addons.verify, [id], STATE_DISABLED]]);
+
+// Enable and see it is synced.
+Phase("phase05", [
+ [EnsureTracking],
+ [Addons.setEnabled, [id], STATE_ENABLED],
+ [Addons.verify, [id], STATE_ENABLED],
+ [Sync],
+]);
+Phase("phase06", [[Sync], [Addons.verify, [id], STATE_ENABLED]]);
+
+// Uninstall and see it is synced.
+Phase("phase07", [
+ [EnsureTracking],
+ [Addons.verify, [id], STATE_ENABLED],
+ [Addons.uninstall, [id]],
+ [Addons.verifyNot, [id]],
+ [Sync],
+]);
+Phase("phase08", [
+ [Addons.verify, [id], STATE_ENABLED],
+ [Sync],
+ [Addons.verifyNot, [id]],
+]);
diff --git a/services/sync/tests/tps/test_addon_wipe.js b/services/sync/tests/tps/test_addon_wipe.js
new file mode 100644
index 0000000000..038f01d014
--- /dev/null
+++ b/services/sync/tests/tps/test_addon_wipe.js
@@ -0,0 +1,31 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// This test ensures that a client wipe followed by an "initial" sync will
+// restore add-ons. This test should expose flaws in the reconciling logic,
+// specifically around AddonsReconciler. This test is in response to bug
+// 792990.
+
+EnableEngines(["addons"]);
+
+var phases = {
+ phase01: "profile1",
+ phase02: "profile1",
+ phase03: "profile1",
+};
+
+const id1 = "restartless-xpi@tests.mozilla.org";
+const id2 = "test-webext@quality.mozilla.org";
+
+Phase("phase01", [[Addons.install, [id1]], [Addons.install, [id2]], [Sync]]);
+Phase("phase02", [
+ [Addons.verify, [id1], STATE_ENABLED],
+ [Addons.verify, [id2], STATE_ENABLED],
+ [Sync, SYNC_WIPE_CLIENT],
+ [Sync],
+]);
+Phase("phase03", [
+ [Addons.verify, [id1], STATE_ENABLED],
+ [Addons.verify, [id2], STATE_ENABLED],
+ [Sync], // Sync to ensure that the addon validator can run without error
+]);
diff --git a/services/sync/tests/tps/test_addresses.js b/services/sync/tests/tps/test_addresses.js
new file mode 100644
index 0000000000..33af349dd5
--- /dev/null
+++ b/services/sync/tests/tps/test_addresses.js
@@ -0,0 +1,84 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* global Services */
+Services.prefs.setBoolPref("services.sync.engine.addresses", true);
+
+EnableEngines(["addresses"]);
+
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+};
+
+const address1 = [
+ {
+ "given-name": "Timothy",
+ "additional-name": "John",
+ "family-name": "Berners-Lee",
+ organization: "World Wide Web Consortium",
+ "street-address": "32 Vassar Street\nMIT Room 32-G524",
+ "address-level2": "Cambridge",
+ "address-level1": "MA",
+ "postal-code": "02139",
+ country: "US",
+ tel: "+16172535702",
+ email: "timbl@w3.org",
+ changes: {
+ organization: "W3C",
+ },
+ "unknown-1": "an unknown field from another client",
+ },
+];
+
+const address1_after = [
+ {
+ "given-name": "Timothy",
+ "additional-name": "John",
+ "family-name": "Berners-Lee",
+ organization: "W3C",
+ "street-address": "32 Vassar Street\nMIT Room 32-G524",
+ "address-level2": "Cambridge",
+ "address-level1": "MA",
+ "postal-code": "02139",
+ country: "US",
+ tel: "+16172535702",
+ email: "timbl@w3.org",
+ "unknown-1": "an unknown field from another client",
+ },
+];
+
+const address2 = [
+ {
+ "given-name": "John",
+ "additional-name": "R.",
+ "family-name": "Smith",
+ organization: "Mozilla",
+ "street-address":
+ "Geb\u00E4ude 3, 4. Obergeschoss\nSchlesische Stra\u00DFe 27",
+ "address-level2": "Berlin",
+ "address-level1": "BE",
+ "postal-code": "10997",
+ country: "DE",
+ tel: "+4930983333000",
+ email: "timbl@w3.org",
+ },
+];
+
+Phase("phase1", [[Addresses.add, address1], [Sync]]);
+
+Phase("phase2", [
+ [Sync],
+ [Addresses.verify, address1],
+ [Addresses.modify, address1],
+ [Addresses.add, address2],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Sync],
+ [Addresses.verify, address1_after],
+ [Addresses.verify, address2],
+ [Sync],
+]);
diff --git a/services/sync/tests/tps/test_bookmark_conflict.js b/services/sync/tests/tps/test_bookmark_conflict.js
new file mode 100644
index 0000000000..2832bded5d
--- /dev/null
+++ b/services/sync/tests/tps/test_bookmark_conflict.js
@@ -0,0 +1,138 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["bookmarks"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+// the initial list of bookmarks to add to the browser
+var bookmarksInitial = {
+ menu: [
+ { folder: "foldera" },
+ { folder: "folderb" },
+ { folder: "folderc" },
+ { folder: "folderd" },
+ ],
+
+ "menu/foldera": [{ uri: "http://www.cnn.com", title: "CNN" }],
+ "menu/folderb": [{ uri: "http://www.apple.com", title: "Apple", tags: [] }],
+ "menu/folderc": [{ uri: "http://www.yahoo.com", title: "Yahoo" }],
+
+ "menu/folderd": [],
+};
+
+// a list of bookmarks to delete during a 'delete' action on P2
+var bookmarksToDelete = {
+ menu: [{ folder: "foldera" }, { folder: "folderb" }],
+ "menu/folderc": [{ uri: "http://www.yahoo.com", title: "Yahoo" }],
+};
+
+// the modifications to make on P1, after P2 has synced, but before P1 has gotten
+// P2's changes
+var bookmarkMods = {
+ menu: [
+ { folder: "foldera" },
+ { folder: "folderb" },
+ { folder: "folderc" },
+ { folder: "folderd" },
+ ],
+
+ // we move this child out of its folder (p1), after deleting the folder (p2)
+ // and expect the child to come back to p2 after sync.
+ "menu/foldera": [
+ {
+ uri: "http://www.cnn.com",
+ title: "CNN",
+ changes: { location: "menu/folderd" },
+ },
+ ],
+
+ // we rename this child (p1) after deleting the folder (p2), and expect the child
+ // to be moved into great grandparent (menu)
+ "menu/folderb": [
+ {
+ uri: "http://www.apple.com",
+ title: "Apple",
+ tags: [],
+ changes: { title: "Mac" },
+ },
+ ],
+
+ // we move this child (p1) after deleting the child (p2) and expect it to survive
+ "menu/folderc": [
+ {
+ uri: "http://www.yahoo.com",
+ title: "Yahoo",
+ changes: { location: "menu/folderd" },
+ },
+ ],
+
+ "menu/folderd": [],
+};
+
+// a list of bookmarks to delete during a 'delete' action
+bookmarksToDelete = {
+ menu: [{ folder: "foldera" }, { folder: "folderb" }],
+ "menu/folderc": [{ uri: "http://www.yahoo.com", title: "Yahoo" }],
+};
+
+// expected bookmark state after conflict resolution
+var bookmarksExpected = {
+ menu: [
+ { folder: "folderc" },
+ { folder: "folderd" },
+ { uri: "http://www.apple.com", title: "Mac" },
+ ],
+
+ "menu/folderc": [],
+
+ "menu/folderd": [
+ { uri: "http://www.cnn.com", title: "CNN" },
+ { uri: "http://www.yahoo.com", title: "Yahoo" },
+ ],
+};
+
+// Add bookmarks to profile1 and sync.
+Phase("phase1", [
+ [Bookmarks.add, bookmarksInitial],
+ [Bookmarks.verify, bookmarksInitial],
+ [Sync],
+ [Bookmarks.verify, bookmarksInitial],
+]);
+
+// Sync to profile2 and verify that the bookmarks are present. Delete
+// bookmarks/folders, verify that it's not present, and sync
+Phase("phase2", [
+ [Sync],
+ [Bookmarks.verify, bookmarksInitial],
+ [Bookmarks.delete, bookmarksToDelete],
+ [Bookmarks.verifyNot, bookmarksToDelete],
+ [Sync],
+]);
+
+// Using profile1, modify the bookmarks, and sync *after* the modification,
+// and then sync again to propagate the reconciliation changes.
+Phase("phase3", [
+ [Bookmarks.verify, bookmarksInitial],
+ [Bookmarks.modify, bookmarkMods],
+ [Sync],
+ [Bookmarks.verify, bookmarksExpected],
+ [Bookmarks.verifyNot, bookmarksToDelete],
+]);
+
+// Back in profile2, do a sync and verify that we're in the expected state
+Phase("phase4", [
+ [Sync],
+ [Bookmarks.verify, bookmarksExpected],
+ [Bookmarks.verifyNot, bookmarksToDelete],
+]);
diff --git a/services/sync/tests/tps/test_bookmarks_in_same_named_folder.js b/services/sync/tests/tps/test_bookmarks_in_same_named_folder.js
new file mode 100644
index 0000000000..69dd8ba8a6
--- /dev/null
+++ b/services/sync/tests/tps/test_bookmarks_in_same_named_folder.js
@@ -0,0 +1,53 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// bug 558077
+
+EnableEngines(["bookmarks"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2", phase3: "profile1" };
+
+var bookmarks_initial_1 = {
+ menu: [
+ { folder: "aaa", description: "foo" },
+ { uri: "http://www.mozilla.com" },
+ ],
+ "menu/aaa": [
+ { uri: "http://www.yahoo.com", title: "testing Yahoo" },
+ { uri: "http://www.google.com", title: "testing Google" },
+ ],
+};
+
+var bookmarks_initial_2 = {
+ menu: [
+ { folder: "aaa", description: "bar" },
+ { uri: "http://www.mozilla.com" },
+ ],
+ "menu/aaa": [
+ {
+ uri: "http://bugzilla.mozilla.org/show_bug.cgi?id=%s",
+ title: "Bugzilla",
+ },
+ { uri: "http://www.apple.com", tags: ["apple"] },
+ ],
+};
+
+Phase("phase1", [[Bookmarks.add, bookmarks_initial_1], [Sync]]);
+
+Phase("phase2", [
+ [Sync],
+ [Bookmarks.verify, bookmarks_initial_1],
+ [Bookmarks.add, bookmarks_initial_2],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Sync],
+ // XXX [Bookmarks.verify, bookmarks_initial_1],
+ [Bookmarks.verify, bookmarks_initial_2],
+]);
diff --git a/services/sync/tests/tps/test_bug501528.js b/services/sync/tests/tps/test_bug501528.js
new file mode 100644
index 0000000000..f86ba02403
--- /dev/null
+++ b/services/sync/tests/tps/test_bug501528.js
@@ -0,0 +1,75 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["passwords"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Password lists
+ */
+
+var passwords_initial = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "secret",
+ usernameField: "uname",
+ passwordField: "pword",
+ changes: {
+ password: "SeCrEt$$$",
+ },
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "jack",
+ password: "secretlogin",
+ },
+];
+
+var passwords_after_first_update = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "SeCrEt$$$",
+ usernameField: "uname",
+ passwordField: "pword",
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "jack",
+ password: "secretlogin",
+ },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [[Passwords.add, passwords_initial], [Sync]]);
+
+Phase("phase2", [[Passwords.add, passwords_initial], [Sync]]);
+
+Phase("phase3", [
+ [Sync],
+ [Passwords.verify, passwords_initial],
+ [Passwords.modify, passwords_initial],
+ [Passwords.verify, passwords_after_first_update],
+ [Sync],
+]);
+
+Phase("phase4", [[Sync], [Passwords.verify, passwords_after_first_update]]);
diff --git a/services/sync/tests/tps/test_bug530717.js b/services/sync/tests/tps/test_bug530717.js
new file mode 100644
index 0000000000..37e6711cb5
--- /dev/null
+++ b/services/sync/tests/tps/test_bug530717.js
@@ -0,0 +1,47 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["prefs"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2", phase3: "profile1" };
+
+/*
+ * Preference lists
+ */
+
+var prefs1 = [
+ { name: "browser.startup.homepage", value: "http://www.getfirefox.com" },
+ { name: "browser.urlbar.maxRichResults", value: 20 },
+ { name: "privacy.clearOnShutdown.siteSettings", value: true },
+];
+
+var prefs2 = [
+ { name: "browser.startup.homepage", value: "http://www.mozilla.com" },
+ { name: "browser.urlbar.maxRichResults", value: 18 },
+ { name: "privacy.clearOnShutdown.siteSettings", value: false },
+];
+
+/*
+ * Test phases
+ */
+
+// Add prefs to profile1 and sync.
+Phase("phase1", [[Prefs.modify, prefs1], [Prefs.verify, prefs1], [Sync]]);
+
+// Sync profile2 and verify same prefs are present.
+Phase("phase2", [[Sync], [Prefs.verify, prefs1]]);
+
+// Using profile1, change some prefs, then do another sync with wipe-client.
+// Verify that the cloud's prefs are restored, and the recent local changes
+// discarded.
+Phase("phase3", [
+ [Prefs.modify, prefs2],
+ [Prefs.verify, prefs2],
+ [Sync, SYNC_WIPE_CLIENT],
+ [Prefs.verify, prefs1],
+]);
diff --git a/services/sync/tests/tps/test_bug531489.js b/services/sync/tests/tps/test_bug531489.js
new file mode 100644
index 0000000000..3cc79c87ec
--- /dev/null
+++ b/services/sync/tests/tps/test_bug531489.js
@@ -0,0 +1,43 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["bookmarks"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2", phase3: "profile1" };
+
+/*
+ * Bookmark asset lists: these define bookmarks that are used during the test
+ */
+
+// the initial list of bookmarks to add to the browser
+var bookmarks_initial = {
+ menu: [
+ { folder: "foldera" },
+ { uri: "http://www.google.com", title: "Google" },
+ ],
+ "menu/foldera": [{ uri: "http://www.google.com", title: "Google" }],
+ toolbar: [{ uri: "http://www.google.com", title: "Google" }],
+};
+
+/*
+ * Test phases
+ */
+
+// Add three bookmarks with the same url to different locations and sync.
+Phase("phase1", [
+ [Bookmarks.add, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_initial],
+ [Sync],
+]);
+
+// Sync to profile2 and verify that all three bookmarks are present
+Phase("phase2", [[Sync], [Bookmarks.verify, bookmarks_initial]]);
+
+// Sync again to profile1 and verify that all three bookmarks are still
+// present.
+Phase("phase3", [[Sync], [Bookmarks.verify, bookmarks_initial]]);
diff --git a/services/sync/tests/tps/test_bug535326.js b/services/sync/tests/tps/test_bug535326.js
new file mode 100644
index 0000000000..ede8f45c10
--- /dev/null
+++ b/services/sync/tests/tps/test_bug535326.js
@@ -0,0 +1,148 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["tabs"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2" };
+
+var tabs1 = [
+ {
+ uri: "data:text/html,<html><head><title>Howdy</title></head><body>Howdy</body></html>",
+ title: "Howdy",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>America</title></head><body>America</body></html>",
+ title: "America",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Apple</title></head><body>Apple</body></html>",
+ title: "Apple",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>This</title></head><body>This</body></html>",
+ title: "This",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Bug</title></head><body>Bug</body></html>",
+ title: "Bug",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>IRC</title></head><body>IRC</body></html>",
+ title: "IRC",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Tinderbox</title></head><body>Tinderbox</body></html>",
+ title: "Tinderbox",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Fox</title></head><body>Fox</body></html>",
+ title: "Fox",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Hello</title></head><body>Hello</body></html>",
+ title: "Hello",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Eagle</title></head><body>Eagle</body></html>",
+ title: "Eagle",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Train</title></head><body>Train</body></html>",
+ title: "Train",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Macbook</title></head><body>Macbook</body></html>",
+ title: "Macbook",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Clock</title></head><body>Clock</body></html>",
+ title: "Clock",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Google</title></head><body>Google</body></html>",
+ title: "Google",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Human</title></head><body>Human</body></html>",
+ title: "Human",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Jetpack</title></head><body>Jetpack</body></html>",
+ title: "Jetpack",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Selenium</title></head><body>Selenium</body></html>",
+ title: "Selenium",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Mozilla</title></head><body>Mozilla</body></html>",
+ title: "Mozilla",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Firefox</title></head><body>Firefox</body></html>",
+ title: "Firefox",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Weave</title></head><body>Weave</body></html>",
+ title: "Weave",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Android</title></head><body>Android</body></html>",
+ title: "Android",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Bye</title></head><body>Bye</body></html>",
+ title: "Bye",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Hi</title></head><body>Hi</body></html>",
+ title: "Hi",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Final</title></head><body>Final</body></html>",
+ title: "Final",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Fennec</title></head><body>Fennec</body></html>",
+ title: "Fennec",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Mobile</title></head><body>Mobile</body></html>",
+ title: "Mobile",
+ profile: "profile1",
+ },
+];
+
+Phase("phase1", [[Tabs.add, tabs1], [Sync]]);
+
+Phase("phase2", [[Sync], [Tabs.verify, tabs1]]);
diff --git a/services/sync/tests/tps/test_bug538298.js b/services/sync/tests/tps/test_bug538298.js
new file mode 100644
index 0000000000..3f1abf2a96
--- /dev/null
+++ b/services/sync/tests/tps/test_bug538298.js
@@ -0,0 +1,78 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["bookmarks"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Bookmark asset lists: these define bookmarks that are used during the test
+ */
+
+// the initial list of bookmarks to add to the browser
+var bookmarks_initial = {
+ toolbar: [
+ { uri: "http://www.google.com", title: "Google" },
+ {
+ uri: "http://www.cnn.com",
+ title: "CNN",
+ changes: {
+ position: "Google",
+ },
+ },
+ { uri: "http://www.mozilla.com", title: "Mozilla" },
+ {
+ uri: "http://www.firefox.com",
+ title: "Firefox",
+ changes: {
+ position: "Mozilla",
+ },
+ },
+ ],
+};
+
+var bookmarks_after_move = {
+ toolbar: [
+ { uri: "http://www.cnn.com", title: "CNN" },
+ { uri: "http://www.google.com", title: "Google" },
+ { uri: "http://www.firefox.com", title: "Firefox" },
+ { uri: "http://www.mozilla.com", title: "Mozilla" },
+ ],
+};
+
+/*
+ * Test phases
+ */
+
+// Add four bookmarks to the toolbar and sync.
+Phase("phase1", [
+ [Bookmarks.add, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_initial],
+ [Sync],
+]);
+
+// Sync to profile2 and verify that all four bookmarks are present.
+Phase("phase2", [[Sync], [Bookmarks.verify, bookmarks_initial]]);
+
+// Change the order of the toolbar bookmarks, and sync.
+Phase("phase3", [
+ [Sync],
+ [Bookmarks.verify, bookmarks_initial],
+ [Bookmarks.modify, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_after_move],
+ [Sync],
+]);
+
+// Go back to profile2, sync, and verify that the bookmarks are reordered
+// as expected.
+Phase("phase4", [[Sync], [Bookmarks.verify, bookmarks_after_move]]);
diff --git a/services/sync/tests/tps/test_bug546807.js b/services/sync/tests/tps/test_bug546807.js
new file mode 100644
index 0000000000..f02f632b41
--- /dev/null
+++ b/services/sync/tests/tps/test_bug546807.js
@@ -0,0 +1,38 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["tabs"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2" };
+
+/*
+ * Tabs data
+ */
+
+var tabs1 = [
+ { uri: "about:config", profile: "profile1" },
+ { uri: "about:credits", profile: "profile1" },
+ {
+ uri: "data:text/html,<html><head><title>Apple</title></head><body>Apple</body></html>",
+ title: "Apple",
+ profile: "profile1",
+ },
+];
+
+var tabs_absent = [
+ { uri: "about:config", profile: "profile1" },
+ { uri: "about:credits", profile: "profile1" },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [[Tabs.add, tabs1], [Sync]]);
+
+Phase("phase2", [[Sync], [Tabs.verifyNot, tabs_absent]]);
diff --git a/services/sync/tests/tps/test_bug556509.js b/services/sync/tests/tps/test_bug556509.js
new file mode 100644
index 0000000000..3a46c3c23e
--- /dev/null
+++ b/services/sync/tests/tps/test_bug556509.js
@@ -0,0 +1,32 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["bookmarks"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2" };
+
+// the initial list of bookmarks to add to the browser
+var bookmarks_initial = {
+ menu: [{ folder: "testfolder", description: "it's just me, a test folder" }],
+ "menu/testfolder": [{ uri: "http://www.mozilla.com", title: "Mozilla" }],
+};
+
+/*
+ * Test phases
+ */
+
+// Add a bookmark folder which has a description, and sync.
+Phase("phase1", [
+ [Bookmarks.add, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_initial],
+ [Sync],
+]);
+
+// Sync to profile2 and verify that the bookmark folder is created, along
+// with its description.
+Phase("phase2", [[Sync], [Bookmarks.verify, bookmarks_initial]]);
diff --git a/services/sync/tests/tps/test_bug562515.js b/services/sync/tests/tps/test_bug562515.js
new file mode 100644
index 0000000000..3a1a6cbee2
--- /dev/null
+++ b/services/sync/tests/tps/test_bug562515.js
@@ -0,0 +1,90 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["bookmarks"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Bookmark lists
+ */
+
+// the initial list of bookmarks to add to the browser
+var bookmarks_initial = {
+ menu: [
+ {
+ uri: "http://www.google.com",
+ tags: ["google", "computers", "internet", "www"],
+ },
+ {
+ uri: "http://bugzilla.mozilla.org/show_bug.cgi?id=%s",
+ title: "Bugzilla",
+ keyword: "bz",
+ },
+ { folder: "foldera" },
+ { uri: "http://www.mozilla.com" },
+ { separator: true },
+ { folder: "folderb" },
+ ],
+ "menu/foldera": [
+ { uri: "http://www.yahoo.com", title: "testing Yahoo" },
+ {
+ uri: "http://www.cnn.com",
+ description: "This is a description of the site a at www.cnn.com",
+ },
+ ],
+ "menu/folderb": [{ uri: "http://www.apple.com", tags: ["apple", "mac"] }],
+ toolbar: [
+ {
+ uri: "place:queryType=0&sort=8&maxResults=10&beginTimeRef=1&beginTime=0",
+ title: "Visited Today",
+ },
+ ],
+};
+
+// a list of bookmarks to delete during a 'delete' action
+var bookmarks_to_delete = {
+ menu: [
+ {
+ uri: "http://www.google.com",
+ tags: ["google", "computers", "internet", "www"],
+ },
+ ],
+ "menu/foldera": [{ uri: "http://www.yahoo.com", title: "testing Yahoo" }],
+};
+
+/*
+ * Test phases
+ */
+
+// add bookmarks to profile1 and sync
+Phase("phase1", [
+ [Bookmarks.add, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_initial],
+ [Sync],
+]);
+
+// sync to profile2 and verify that the bookmarks are present
+Phase("phase2", [[Sync], [Bookmarks.verify, bookmarks_initial]]);
+
+// delete some bookmarks from profile1, then sync with "wipe-client"
+// set; finally, verify that the deleted bookmarks were restored.
+Phase("phase3", [
+ [Bookmarks.delete, bookmarks_to_delete],
+ [Bookmarks.verifyNot, bookmarks_to_delete],
+ [Sync, SYNC_WIPE_CLIENT],
+ [Bookmarks.verify, bookmarks_initial],
+]);
+
+// sync profile2 again, verify no bookmarks have been deleted
+Phase("phase4", [[Sync], [Bookmarks.verify, bookmarks_initial]]);
diff --git a/services/sync/tests/tps/test_bug575423.js b/services/sync/tests/tps/test_bug575423.js
new file mode 100644
index 0000000000..53f46db5b7
--- /dev/null
+++ b/services/sync/tests/tps/test_bug575423.js
@@ -0,0 +1,67 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["history"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2" };
+
+/*
+ * History data
+ */
+
+// the history data to add to the browser
+var history1 = [
+ {
+ uri: "http://www.google.com/",
+ title: "Google",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -1 },
+ ],
+ },
+ {
+ uri: "http://www.cnn.com/",
+ title: "CNN",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 2, date: -36 },
+ ],
+ },
+];
+
+// Another history data to add to the browser
+var history2 = [
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -36 },
+ ],
+ },
+ {
+ uri: "http://www.google.com/language_tools?hl=en",
+ title: "Language Tools",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -40 },
+ ],
+ },
+];
+
+/*
+ * Test phases
+ */
+Phase("phase1", [
+ [History.add, history1],
+ [Sync],
+ [History.add, history2],
+ [Sync],
+]);
+
+Phase("phase2", [[Sync], [History.verify, history2]]);
diff --git a/services/sync/tests/tps/test_client_wipe.js b/services/sync/tests/tps/test_client_wipe.js
new file mode 100644
index 0000000000..e0fb7a97ac
--- /dev/null
+++ b/services/sync/tests/tps/test_client_wipe.js
@@ -0,0 +1,142 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2", phase3: "profile1" };
+
+/*
+ * Bookmark lists
+ */
+
+// the initial list of bookmarks to add to the browser
+var bookmarks_initial = {
+ toolbar: [
+ { uri: "http://www.google.com", title: "Google" },
+ {
+ uri: "http://www.cnn.com",
+ title: "CNN",
+ changes: {
+ position: "Google",
+ },
+ },
+ { uri: "http://www.mozilla.com", title: "Mozilla" },
+ {
+ uri: "http://www.firefox.com",
+ title: "Firefox",
+ changes: {
+ position: "Mozilla",
+ },
+ },
+ ],
+};
+
+var bookmarks_after_move = {
+ toolbar: [
+ { uri: "http://www.cnn.com", title: "CNN" },
+ { uri: "http://www.google.com", title: "Google" },
+ { uri: "http://www.firefox.com", title: "Firefox" },
+ { uri: "http://www.mozilla.com", title: "Mozilla" },
+ ],
+};
+
+/*
+ * Password data
+ */
+
+// Initial password data
+var passwords_initial = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "secret",
+ usernameField: "uname",
+ passwordField: "pword",
+ changes: {
+ password: "SeCrEt$$$",
+ },
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "jack",
+ password: "secretlogin",
+ },
+];
+
+// Password after first modify action has been performed
+var passwords_after_change = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "SeCrEt$$$",
+ usernameField: "uname",
+ passwordField: "pword",
+ changes: {
+ username: "james",
+ },
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "jack",
+ password: "secretlogin",
+ },
+];
+
+/*
+ * Prefs to use in the test
+ */
+var prefs1 = [
+ { name: "browser.startup.homepage", value: "http://www.getfirefox.com" },
+ { name: "browser.urlbar.maxRichResults", value: 20 },
+ { name: "privacy.clearOnShutdown.siteSettings", value: true },
+];
+
+var prefs2 = [
+ { name: "browser.startup.homepage", value: "http://www.mozilla.com" },
+ { name: "browser.urlbar.maxRichResults", value: 18 },
+ { name: "privacy.clearOnShutdown.siteSettings", value: false },
+];
+
+/*
+ * Test phases
+ */
+
+// Add prefs,passwords and bookmarks to profile1 and sync.
+Phase("phase1", [
+ [Passwords.add, passwords_initial],
+ [Bookmarks.add, bookmarks_initial],
+ [Prefs.modify, prefs1],
+ [Prefs.verify, prefs1],
+ [Sync],
+]);
+
+// Sync profile2 and verify same prefs,passwords and bookmarks are present.
+Phase("phase2", [
+ [Sync],
+ [Prefs.verify, prefs1],
+ [Passwords.verify, passwords_initial],
+ [Bookmarks.verify, bookmarks_initial],
+]);
+
+// Using profile1, change some prefs,bookmarks and pwds, then do another sync with wipe-client.
+// Verify that the cloud's settings are restored, and the recent local changes
+// discarded.
+Phase("phase3", [
+ [Prefs.modify, prefs2],
+ [Passwords.modify, passwords_initial],
+ [Bookmarks.modify, bookmarks_initial],
+ [Prefs.verify, prefs2],
+ [Passwords.verify, passwords_after_change],
+ [Bookmarks.verify, bookmarks_after_move],
+ [Sync, SYNC_WIPE_CLIENT],
+ [Prefs.verify, prefs1],
+ [Passwords.verify, passwords_initial],
+ [Bookmarks.verify, bookmarks_initial],
+]);
diff --git a/services/sync/tests/tps/test_creditcards.js b/services/sync/tests/tps/test_creditcards.js
new file mode 100644
index 0000000000..fea2b8a541
--- /dev/null
+++ b/services/sync/tests/tps/test_creditcards.js
@@ -0,0 +1,62 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* global Services */
+Services.prefs.setBoolPref("services.sync.engine.creditcards", true);
+
+EnableEngines(["creditcards"]);
+
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+};
+
+const cc1 = [
+ {
+ "cc-name": "John Doe",
+ "cc-number": "4716179744040592",
+ "cc-exp-month": 4,
+ "cc-exp-year": 2050,
+ "unknown-1": "an unknown field from another client",
+ changes: {
+ "cc-exp-year": 2051,
+ },
+ },
+];
+
+const cc1_after = [
+ {
+ "cc-name": "John Doe",
+ "cc-number": "4716179744040592",
+ "cc-exp-month": 4,
+ "cc-exp-year": 2051,
+ "unknown-1": "an unknown field from another client",
+ },
+];
+
+const cc2 = [
+ {
+ "cc-name": "Timothy Berners-Lee",
+ "cc-number": "2221000374457678",
+ "cc-exp-month": 12,
+ "cc-exp-year": 2050,
+ },
+];
+
+Phase("phase1", [[CreditCards.add, cc1], [Sync]]);
+
+Phase("phase2", [
+ [Sync],
+ [CreditCards.verify, cc1],
+ [CreditCards.modify, cc1],
+ [CreditCards.add, cc2],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Sync],
+ [CreditCards.verifyNot, cc1],
+ [CreditCards.verify, cc1_after],
+ [CreditCards.verify, cc2],
+]);
diff --git a/services/sync/tests/tps/test_existing_bookmarks.js b/services/sync/tests/tps/test_existing_bookmarks.js
new file mode 100644
index 0000000000..cde41a96e6
--- /dev/null
+++ b/services/sync/tests/tps/test_existing_bookmarks.js
@@ -0,0 +1,80 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["bookmarks"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile2",
+ phase4: "profile1",
+};
+
+/*
+ * Bookmark lists
+ */
+var bookmarks_initial = {
+ menu: [
+ {
+ uri: "http://www.google.com",
+ title: "Google",
+ changes: {
+ title: "google",
+ },
+ },
+ {
+ uri: "http://bugzilla.mozilla.org/show_bug.cgi?id=%s",
+ title: "Bugzilla",
+ },
+ { uri: "http://www.mozilla.com" },
+ {
+ uri: "http://www.cnn.com",
+ description: "This is a description of the site a at www.cnn.com",
+ changes: {
+ description: "Global news",
+ },
+ },
+ ],
+};
+
+var bookmarks_after = {
+ menu: [
+ { uri: "http://www.google.com", title: "google" },
+ {
+ uri: "http://bugzilla.mozilla.org/show_bug.cgi?id=%s",
+ title: "Bugzilla",
+ },
+ { uri: "http://www.mozilla.com" },
+ { uri: "http://www.cnn.com", description: "Global news" },
+ ],
+};
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [
+ [Bookmarks.add, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_initial],
+ [Sync],
+]);
+
+Phase("phase2", [
+ [Bookmarks.add, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_initial],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Bookmarks.verify, bookmarks_initial],
+ [Bookmarks.modify, bookmarks_initial],
+ [Bookmarks.verify, bookmarks_after],
+ [Sync],
+]);
+
+Phase("phase4", [[Sync], [Bookmarks.verify, bookmarks_after]]);
diff --git a/services/sync/tests/tps/test_extstorage.js b/services/sync/tests/tps/test_extstorage.js
new file mode 100644
index 0000000000..cf25187bc3
--- /dev/null
+++ b/services/sync/tests/tps/test_extstorage.js
@@ -0,0 +1,154 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["addons"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+ phase5: "profile1",
+};
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [
+ [
+ ExtStorage.set,
+ "ext-1",
+ {
+ "key-1": {
+ sub_key_1: "value 1",
+ sub_key_2: "value 2",
+ },
+ "key-2": {
+ sk_1: "v1",
+ sk_2: "v2",
+ },
+ },
+ ],
+ [
+ ExtStorage.verify,
+ "ext-1",
+ null,
+ {
+ "key-1": {
+ sub_key_1: "value 1",
+ sub_key_2: "value 2",
+ },
+ "key-2": {
+ sk_1: "v1",
+ sk_2: "v2",
+ },
+ },
+ ],
+ [Sync],
+]);
+
+Phase("phase2", [
+ [Sync],
+ [
+ ExtStorage.set,
+ "ext-1",
+ {
+ "key-2": "value from profile 2",
+ },
+ ],
+ [
+ ExtStorage.verify,
+ "ext-1",
+ null,
+ {
+ "key-1": {
+ sub_key_1: "value 1",
+ sub_key_2: "value 2",
+ },
+ "key-2": "value from profile 2",
+ },
+ ],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Sync],
+ [
+ ExtStorage.verify,
+ "ext-1",
+ null,
+ {
+ "key-1": {
+ sub_key_1: "value 1",
+ sub_key_2: "value 2",
+ },
+ "key-2": "value from profile 2",
+ },
+ ],
+ [
+ ExtStorage.set,
+ "ext-1",
+ {
+ "key-2": "value from profile 1",
+ },
+ ],
+ // exit without syncing.
+]);
+
+Phase("phase4", [
+ [Sync],
+ [
+ ExtStorage.verify,
+ "ext-1",
+ null,
+ {
+ "key-1": {
+ sub_key_1: "value 1",
+ sub_key_2: "value 2",
+ },
+ "key-2": "value from profile 2",
+ },
+ ],
+ [
+ ExtStorage.set,
+ "ext-1",
+ {
+ "key-2": "second value from profile 2",
+ },
+ ],
+ [Sync],
+]);
+
+Phase("phase5", [
+ [
+ ExtStorage.verify,
+ "ext-1",
+ null,
+ {
+ "key-1": {
+ sub_key_1: "value 1",
+ sub_key_2: "value 2",
+ },
+ "key-2": "value from profile 1",
+ },
+ ],
+ [Sync],
+ [
+ ExtStorage.verify,
+ "ext-1",
+ null,
+ {
+ "key-1": {
+ sub_key_1: "value 1",
+ sub_key_2: "value 2",
+ },
+ "key-2": "second value from profile 2",
+ },
+ ],
+]);
diff --git a/services/sync/tests/tps/test_formdata.js b/services/sync/tests/tps/test_formdata.js
new file mode 100644
index 0000000000..764b12342f
--- /dev/null
+++ b/services/sync/tests/tps/test_formdata.js
@@ -0,0 +1,63 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["forms"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Form data asset lists: these define form values that are used in the tests.
+ */
+
+var formdata1 = [
+ { fieldname: "testing", value: "success", date: -1 },
+ { fieldname: "testing", value: "failure", date: -2 },
+ { fieldname: "username", value: "joe" },
+];
+
+var formdata2 = [
+ { fieldname: "testing", value: "success", date: -1 },
+ { fieldname: "username", value: "joe" },
+];
+
+var formdata_delete = [{ fieldname: "testing", value: "failure" }];
+
+var formdata_new = [{ fieldname: "new-field", value: "new-value" }];
+/*
+ * Test phases
+ */
+
+Phase("phase1", [
+ [Formdata.add, formdata1],
+ [Formdata.verify, formdata1],
+ [Sync],
+]);
+
+Phase("phase2", [[Sync], [Formdata.verify, formdata1]]);
+
+Phase("phase3", [
+ [Sync],
+ [Formdata.delete, formdata_delete],
+ [Formdata.verifyNot, formdata_delete],
+ [Formdata.verify, formdata2],
+ // add new data after the first Sync, ensuring the tracker works.
+ [Formdata.add, formdata_new],
+ [Sync],
+]);
+
+Phase("phase4", [
+ [Sync],
+ [Formdata.verify, formdata2],
+ [Formdata.verify, formdata_new],
+ [Formdata.verifyNot, formdata_delete],
+]);
diff --git a/services/sync/tests/tps/test_history.js b/services/sync/tests/tps/test_history.js
new file mode 100644
index 0000000000..e1df78c762
--- /dev/null
+++ b/services/sync/tests/tps/test_history.js
@@ -0,0 +1,129 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["history"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2" };
+
+/*
+ * History asset lists: these define history entries that are used during
+ * the test
+ */
+
+// the initial list of history items to add to the browser
+var history1 = [
+ {
+ uri: "http://www.google.com/",
+ title: "Google",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -1 },
+ ],
+ },
+ {
+ uri: "http://www.cnn.com/",
+ title: "CNN",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 2, date: -36 },
+ ],
+ },
+ {
+ uri: "http://www.google.com/language_tools?hl=en",
+ title: "Language Tools",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -40 },
+ ],
+ },
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 1, date: -1 },
+ { type: 1, date: -20 },
+ { type: 2, date: -36 },
+ ],
+ },
+];
+
+// a list of items to delete from the history
+var history_to_delete = [
+ { uri: "http://www.cnn.com/" },
+ { begin: -24, end: -1 },
+ { host: "www.google.com" },
+];
+
+// a list which reflects items that should be in the history after
+// the above items are deleted
+var history2 = [
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -36 },
+ ],
+ },
+];
+
+// a list which includes history entries that should not be present
+// after deletion of the history_to_delete entries
+var history_not = [
+ {
+ uri: "http://www.google.com/",
+ title: "Google",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -1 },
+ ],
+ },
+ {
+ uri: "http://www.cnn.com/",
+ title: "CNN",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 2, date: -36 },
+ ],
+ },
+ {
+ uri: "http://www.google.com/language_tools?hl=en",
+ title: "Language Tools",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -40 },
+ ],
+ },
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 1, date: -20 },
+ ],
+ },
+];
+
+/*
+ * Test phases
+ * Note: there is no test phase in which deleted history entries are
+ * synced to other clients. This functionality is not supported by
+ * Sync, see bug 446517.
+ */
+
+Phase("phase1", [[History.add, history1], [Sync]]);
+
+Phase("phase2", [
+ [Sync],
+ [History.verify, history1],
+ [History.delete, history_to_delete],
+ [History.verify, history2],
+ [History.verifyNot, history_not],
+ [Sync],
+]);
diff --git a/services/sync/tests/tps/test_history_collision.js b/services/sync/tests/tps/test_history_collision.js
new file mode 100644
index 0000000000..ea428d1051
--- /dev/null
+++ b/services/sync/tests/tps/test_history_collision.js
@@ -0,0 +1,98 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["history"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * History lists
+ */
+
+// the initial list of history to add to the browser
+var history1 = [
+ {
+ uri: "http://www.google.com/",
+ title: "Google",
+ visits: [{ type: 1, date: 0 }],
+ },
+ {
+ uri: "http://www.cnn.com/",
+ title: "CNN",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 2, date: -36 },
+ ],
+ },
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -36 },
+ ],
+ },
+];
+
+// the history to delete
+var history_to_delete = [
+ { uri: "http://www.cnn.com/", title: "CNN" },
+ { begin: -36, end: -1 },
+];
+
+var history_not = [
+ {
+ uri: "http://www.cnn.com/",
+ title: "CNN",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 2, date: -36 },
+ ],
+ },
+];
+
+var history_after_delete = [
+ {
+ uri: "http://www.google.com/",
+ title: "Google",
+ visits: [{ type: 1, date: 0 }],
+ },
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [{ type: 1, date: 0 }],
+ },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [[History.add, history1], [Sync]]);
+
+Phase("phase2", [[Sync], [History.add, history1], [Sync, SYNC_WIPE_REMOTE]]);
+
+Phase("phase3", [
+ [Sync],
+ [History.verify, history1],
+ [History.delete, history_to_delete],
+ [History.verify, history_after_delete],
+ [History.verifyNot, history_not],
+ [Sync],
+]);
+
+Phase("phase4", [
+ [Sync],
+ [History.verify, history_after_delete],
+ [History.verifyNot, history_not],
+]);
diff --git a/services/sync/tests/tps/test_passwords.js b/services/sync/tests/tps/test_passwords.js
new file mode 100644
index 0000000000..2dfd7de8dd
--- /dev/null
+++ b/services/sync/tests/tps/test_passwords.js
@@ -0,0 +1,119 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["passwords"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Password asset lists: these define password entries that are used during
+ * the test
+ */
+
+// initial password list to be loaded into the browser
+var passwords_initial = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "SeCrEt123",
+ usernameField: "uname",
+ passwordField: "pword",
+ changes: {
+ password: "zippity-do-dah",
+ },
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+// expected state of passwords after the changes in the above list are applied
+var passwords_after_first_update = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "zippity-do-dah",
+ usernameField: "uname",
+ passwordField: "pword",
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+var passwords_to_delete = [
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+var passwords_absent = [
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+// expected state of passwords after the delete operation
+var passwords_after_second_update = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "zippity-do-dah",
+ usernameField: "uname",
+ passwordField: "pword",
+ },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [[Passwords.add, passwords_initial], [Sync]]);
+
+Phase("phase2", [
+ [Sync],
+ [Passwords.verify, passwords_initial],
+ [Passwords.modify, passwords_initial],
+ [Passwords.verify, passwords_after_first_update],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Sync],
+ [Passwords.verify, passwords_after_first_update],
+ [Passwords.delete, passwords_to_delete],
+ [Passwords.verify, passwords_after_second_update],
+ [Passwords.verifyNot, passwords_absent],
+ [Sync],
+]);
+
+Phase("phase4", [
+ [Sync],
+ [Passwords.verify, passwords_after_second_update],
+ [Passwords.verifyNot, passwords_absent],
+]);
diff --git a/services/sync/tests/tps/test_prefs.js b/services/sync/tests/tps/test_prefs.js
new file mode 100644
index 0000000000..3e26e947dd
--- /dev/null
+++ b/services/sync/tests/tps/test_prefs.js
@@ -0,0 +1,35 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["prefs"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2", phase3: "profile1" };
+
+var prefs1 = [
+ { name: "browser.startup.homepage", value: "http://www.getfirefox.com" },
+ { name: "browser.urlbar.maxRichResults", value: 20 },
+ { name: "privacy.clearOnShutdown.siteSettings", value: true },
+];
+
+var prefs2 = [
+ { name: "browser.startup.homepage", value: "http://www.mozilla.com" },
+ { name: "browser.urlbar.maxRichResults", value: 18 },
+ { name: "privacy.clearOnShutdown.siteSettings", value: false },
+];
+
+Phase("phase1", [[Prefs.modify, prefs1], [Prefs.verify, prefs1], [Sync]]);
+
+Phase("phase2", [
+ [Sync],
+ [Prefs.verify, prefs1],
+ [Prefs.modify, prefs2],
+ [Prefs.verify, prefs2],
+ [Sync],
+]);
+
+Phase("phase3", [[Sync], [Prefs.verify, prefs2]]);
diff --git a/services/sync/tests/tps/test_privbrw_passwords.js b/services/sync/tests/tps/test_privbrw_passwords.js
new file mode 100644
index 0000000000..dcde6c02b8
--- /dev/null
+++ b/services/sync/tests/tps/test_privbrw_passwords.js
@@ -0,0 +1,105 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in strict JSON format, as it will get parsed by the Python
+ * testrunner (no single quotes, extra comma's, etc).
+ */
+EnableEngines(["passwords"]);
+
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Password data
+ */
+
+// Initial password data
+var passwords_initial = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "secret",
+ usernameField: "uname",
+ passwordField: "pword",
+ changes: {
+ password: "SeCrEt$$$",
+ },
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "jack",
+ password: "secretlogin",
+ },
+];
+
+// Password after first modify action has been performed
+var passwords_after_first_change = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "SeCrEt$$$",
+ usernameField: "uname",
+ passwordField: "pword",
+ changes: {
+ username: "james",
+ },
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "jack",
+ password: "secretlogin",
+ },
+];
+
+// Password after second modify action has been performed
+var passwords_after_second_change = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "james",
+ password: "SeCrEt$$$",
+ usernameField: "uname",
+ passwordField: "pword",
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "jack",
+ password: "secretlogin",
+ },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [[Passwords.add, passwords_initial], [Sync]]);
+
+Phase("phase2", [
+ [Sync],
+ [Passwords.verify, passwords_initial],
+ [Passwords.modify, passwords_initial],
+ [Passwords.verify, passwords_after_first_change],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Sync],
+ [Windows.add, { private: true }],
+ [Passwords.verify, passwords_after_first_change],
+ [Passwords.modify, passwords_after_first_change],
+ [Passwords.verify, passwords_after_second_change],
+ [Sync],
+]);
+
+Phase("phase4", [[Sync], [Passwords.verify, passwords_after_second_change]]);
diff --git a/services/sync/tests/tps/test_privbrw_tabs.js b/services/sync/tests/tps/test_privbrw_tabs.js
new file mode 100644
index 0000000000..c7c877677f
--- /dev/null
+++ b/services/sync/tests/tps/test_privbrw_tabs.js
@@ -0,0 +1,86 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in strict JSON format, as it will get parsed by the Python
+ * testrunner (no single quotes, extra comma's, etc).
+ */
+EnableEngines(["tabs"]);
+
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Tabs data
+ */
+
+var tabs1 = [
+ {
+ uri: "data:text/html,<html><head><title>Firefox</title></head><body>Firefox</body></html>",
+ title: "Firefox",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Weave</title></head><body>Weave</body></html>",
+ title: "Weave",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Apple</title></head><body>Apple</body></html>",
+ title: "Apple",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>IRC</title></head><body>IRC</body></html>",
+ title: "IRC",
+ profile: "profile1",
+ },
+];
+
+var tabs2 = [
+ {
+ uri: "data:text/html,<html><head><title>Tinderbox</title></head><body>Tinderbox</body></html>",
+ title: "Tinderbox",
+ profile: "profile2",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Fox</title></head><body>Fox</body></html>",
+ title: "Fox",
+ profile: "profile2",
+ },
+];
+
+var tabs3 = [
+ {
+ uri: "data:text/html,<html><head><title>Jetpack</title></head><body>Jetpack</body></html>",
+ title: "Jetpack",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Selenium</title></head><body>Selenium</body></html>",
+ title: "Selenium",
+ profile: "profile1",
+ },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [[Tabs.add, tabs1], [Sync]]);
+
+Phase("phase2", [[Sync], [Tabs.verify, tabs1], [Tabs.add, tabs2], [Sync]]);
+
+Phase("phase3", [
+ [Sync],
+ [Windows.add, { private: true }],
+ [Tabs.add, tabs3],
+ [Sync],
+]);
+
+Phase("phase4", [[Sync], [Tabs.verifyNot, tabs3]]);
diff --git a/services/sync/tests/tps/test_special_tabs.js b/services/sync/tests/tps/test_special_tabs.js
new file mode 100644
index 0000000000..9e1eae88e3
--- /dev/null
+++ b/services/sync/tests/tps/test_special_tabs.js
@@ -0,0 +1,63 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Bug 532173 - Dont sync tabs like about:* , weave firstrun etc
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in strict JSON format, as it will get parsed by the Python
+ * testrunner (no single quotes, extra comma's, etc).
+ */
+EnableEngines(["tabs"]);
+
+var phases = { phase1: "profile1", phase2: "profile2" };
+
+var tabs1 = [
+ {
+ uri: "data:text/html,<html><head><title>Firefox</title></head><body>Firefox</body></html>",
+ title: "Firefox",
+ profile: "profile1",
+ },
+ { uri: "about:robots", title: "About", profile: "profile1" },
+ { uri: "about:credits", title: "Credits", profile: "profile1" },
+ {
+ uri: "data:text/html,<html><head><title>Mozilla</title></head><body>Mozilla</body></html>",
+ title: "Mozilla",
+ profile: "profile1",
+ },
+ {
+ uri: "http://www.mozilla.com/en-US/firefox/sync/firstrun.html",
+ title: "Firstrun",
+ profile: "profile1",
+ },
+];
+
+var tabs2 = [
+ {
+ uri: "data:text/html,<html><head><title>Firefox</title></head><body>Firefox</body></html>",
+ title: "Firefox",
+ profile: "profile1",
+ },
+ {
+ uri: "data:text/html,<html><head><title>Mozilla</title></head><body>Mozilla</body></html>",
+ title: "Mozilla",
+ profile: "profile1",
+ },
+];
+
+var tabs3 = [
+ {
+ uri: "http://www.mozilla.com/en-US/firefox/sync/firstrun.html",
+ title: "Firstrun",
+ profile: "profile1",
+ },
+ { uri: "about:robots", title: "About", profile: "profile1" },
+ { uri: "about:credits", title: "Credits", profile: "profile1" },
+];
+
+/*
+ * Test phases
+ */
+Phase("phase1", [[Tabs.add, tabs1], [Sync]]);
+
+Phase("phase2", [[Sync], [Tabs.verify, tabs2], [Tabs.verifyNot, tabs3]]);
diff --git a/services/sync/tests/tps/test_sync.js b/services/sync/tests/tps/test_sync.js
new file mode 100644
index 0000000000..3d14430c60
--- /dev/null
+++ b/services/sync/tests/tps/test_sync.js
@@ -0,0 +1,403 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in strict JSON format, as it will get parsed by the Python
+ * testrunner (no single quotes, extra comma's, etc).
+ */
+
+var phases = {
+ phase1: "profile1",
+ phase2: "profile2",
+ phase3: "profile1",
+ phase4: "profile2",
+};
+
+/*
+ * Bookmark asset lists: these define bookmarks that are used during the test
+ */
+
+// the initial list of bookmarks to be added to the browser
+var bookmarks_initial = {
+ menu: [
+ {
+ uri: "http://www.google.com",
+ tags: ["google", "computers", "internet", "www"],
+ changes: {
+ title: "Google",
+ tags: ["google", "computers", "misc"],
+ },
+ },
+ {
+ uri: "http://bugzilla.mozilla.org/show_bug.cgi?id=%s",
+ title: "Bugzilla",
+ keyword: "bz",
+ changes: {
+ keyword: "bugzilla",
+ },
+ },
+ { folder: "foldera" },
+ { uri: "http://www.mozilla.com" },
+ { separator: true },
+ { folder: "folderb" },
+ ],
+ "menu/foldera": [
+ {
+ uri: "http://www.yahoo.com",
+ title: "testing Yahoo",
+ changes: {
+ location: "menu/folderb",
+ },
+ },
+ {
+ uri: "http://www.cnn.com",
+ description: "This is a description of the site a at www.cnn.com",
+ changes: {
+ uri: "http://money.cnn.com",
+ description: "new description",
+ },
+ },
+ ],
+ "menu/folderb": [
+ {
+ uri: "http://www.apple.com",
+ tags: ["apple", "mac"],
+ changes: {
+ uri: "http://www.apple.com/iphone/",
+ title: "iPhone",
+ location: "menu",
+ position: "Google",
+ tags: [],
+ },
+ },
+ ],
+ toolbar: [
+ {
+ uri: "place:queryType=0&sort=8&maxResults=10&beginTimeRef=1&beginTime=0",
+ title: "Visited Today",
+ },
+ ],
+};
+
+// the state of bookmarks after the first 'modify' action has been performed
+// on them
+var bookmarks_after_first_modify = {
+ menu: [
+ {
+ uri: "http://www.apple.com/iphone/",
+ title: "iPhone",
+ before: "Google",
+ tags: [],
+ },
+ {
+ uri: "http://www.google.com",
+ title: "Google",
+ tags: ["google", "computers", "misc"],
+ },
+ {
+ uri: "http://bugzilla.mozilla.org/show_bug.cgi?id=%s",
+ title: "Bugzilla",
+ keyword: "bugzilla",
+ },
+ { folder: "foldera" },
+ { uri: "http://www.mozilla.com" },
+ { separator: true },
+ {
+ folder: "folderb",
+ changes: {
+ location: "menu/foldera",
+ folder: "Folder B",
+ description: "folder description",
+ },
+ },
+ ],
+ "menu/foldera": [
+ {
+ uri: "http://money.cnn.com",
+ title: "http://www.cnn.com",
+ description: "new description",
+ },
+ ],
+ "menu/folderb": [{ uri: "http://www.yahoo.com", title: "testing Yahoo" }],
+ toolbar: [
+ {
+ uri: "place:queryType=0&sort=8&maxResults=10&beginTimeRef=1&beginTime=0",
+ title: "Visited Today",
+ },
+ ],
+};
+
+// a list of bookmarks to delete during a 'delete' action
+var bookmarks_to_delete = {
+ menu: [
+ {
+ uri: "http://www.google.com",
+ title: "Google",
+ tags: ["google", "computers", "misc"],
+ },
+ ],
+};
+
+// the state of bookmarks after the second 'modify' action has been performed
+// on them
+var bookmarks_after_second_modify = {
+ menu: [
+ { uri: "http://www.apple.com/iphone/", title: "iPhone" },
+ {
+ uri: "http://bugzilla.mozilla.org/show_bug.cgi?id=%s",
+ title: "Bugzilla",
+ keyword: "bugzilla",
+ },
+ { folder: "foldera" },
+ { uri: "http://www.mozilla.com" },
+ { separator: true },
+ ],
+ "menu/foldera": [
+ {
+ uri: "http://money.cnn.com",
+ title: "http://www.cnn.com",
+ description: "new description",
+ },
+ { folder: "Folder B", description: "folder description" },
+ ],
+ "menu/foldera/Folder B": [
+ { uri: "http://www.yahoo.com", title: "testing Yahoo" },
+ ],
+};
+
+// a list of bookmarks which should not be present after the last
+// 'delete' and 'modify' actions
+var bookmarks_absent = {
+ menu: [
+ { uri: "http://www.google.com", title: "Google" },
+ { folder: "folderb" },
+ { folder: "Folder B" },
+ ],
+};
+
+/*
+ * History asset lists: these define history entries that are used during
+ * the test
+ */
+
+// the initial list of history items to add to the browser
+var history_initial = [
+ {
+ uri: "http://www.google.com/",
+ title: "Google",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -1 },
+ ],
+ },
+ {
+ uri: "http://www.cnn.com/",
+ title: "CNN",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 2, date: -36 },
+ ],
+ },
+ {
+ uri: "http://www.google.com/language_tools?hl=en",
+ title: "Language Tools",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -40 },
+ ],
+ },
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 1, date: -1 },
+ { type: 1, date: -20 },
+ { type: 2, date: -36 },
+ ],
+ },
+];
+
+// a list of history entries to delete during a 'delete' action
+var history_to_delete = [
+ { uri: "http://www.cnn.com/" },
+ { begin: -24, end: -1 },
+ { host: "www.google.com" },
+];
+
+// the expected history entries after the first 'delete' action
+var history_after_delete = [
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -36 },
+ ],
+ },
+];
+
+// history entries expected to not exist after a 'delete' action
+var history_absent = [
+ {
+ uri: "http://www.google.com/",
+ title: "Google",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -1 },
+ ],
+ },
+ {
+ uri: "http://www.cnn.com/",
+ title: "CNN",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 2, date: -36 },
+ ],
+ },
+ {
+ uri: "http://www.google.com/language_tools?hl=en",
+ title: "Language Tools",
+ visits: [
+ { type: 1, date: 0 },
+ { type: 2, date: -40 },
+ ],
+ },
+ {
+ uri: "http://www.mozilla.com/",
+ title: "Mozilla",
+ visits: [
+ { type: 1, date: -1 },
+ { type: 1, date: -20 },
+ ],
+ },
+];
+
+/*
+ * Password asset lists: these define password entries that are used during
+ * the test
+ */
+
+// the initial list of passwords to add to the browser
+var passwords_initial = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "SeCrEt123",
+ usernameField: "uname",
+ passwordField: "pword",
+ changes: {
+ password: "zippity-do-dah",
+ },
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+// the expected state of passwords after the first 'modify' action
+var passwords_after_first_modify = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "zippity-do-dah",
+ usernameField: "uname",
+ passwordField: "pword",
+ },
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+// a list of passwords to delete during a 'delete' action
+var passwords_to_delete = [
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+// a list of passwords expected to be absent after 'delete' and 'modify'
+// actions
+var passwords_absent = [
+ {
+ hostname: "http://www.example.com",
+ realm: "login",
+ username: "joe",
+ password: "secretlogin",
+ },
+];
+
+// the expected state of passwords after the seconds 'modify' action
+var passwords_after_second_modify = [
+ {
+ hostname: "http://www.example.com",
+ submitURL: "http://login.example.com",
+ username: "joe",
+ password: "zippity-do-dah",
+ usernameField: "uname",
+ passwordField: "pword",
+ },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [
+ [Bookmarks.add, bookmarks_initial],
+ [Passwords.add, passwords_initial],
+ [History.add, history_initial],
+ [Sync],
+]);
+
+Phase("phase2", [
+ [Sync],
+ [Bookmarks.verify, bookmarks_initial],
+ [Passwords.verify, passwords_initial],
+ [History.verify, history_initial],
+ [Bookmarks.modify, bookmarks_initial],
+ [Passwords.modify, passwords_initial],
+ [History.delete, history_to_delete],
+ [Bookmarks.verify, bookmarks_after_first_modify],
+ [Passwords.verify, passwords_after_first_modify],
+ [History.verify, history_after_delete],
+ [History.verifyNot, history_absent],
+ [Sync],
+]);
+
+Phase("phase3", [
+ [Sync],
+ [Bookmarks.verify, bookmarks_after_first_modify],
+ [Passwords.verify, passwords_after_first_modify],
+ [History.verify, history_after_delete],
+ [Bookmarks.modify, bookmarks_after_first_modify],
+ [Passwords.modify, passwords_after_first_modify],
+ [Bookmarks.delete, bookmarks_to_delete],
+ [Passwords.delete, passwords_to_delete],
+ [Bookmarks.verify, bookmarks_after_second_modify],
+ [Passwords.verify, passwords_after_second_modify],
+ [Bookmarks.verifyNot, bookmarks_absent],
+ [Passwords.verifyNot, passwords_absent],
+ [Sync],
+]);
+
+Phase("phase4", [
+ [Sync],
+ [Bookmarks.verify, bookmarks_after_second_modify],
+ [Passwords.verify, passwords_after_second_modify],
+ [Bookmarks.verifyNot, bookmarks_absent],
+ [Passwords.verifyNot, passwords_absent],
+ [History.verifyNot, history_absent],
+]);
diff --git a/services/sync/tests/tps/test_tabs.js b/services/sync/tests/tps/test_tabs.js
new file mode 100644
index 0000000000..58e8d358cd
--- /dev/null
+++ b/services/sync/tests/tps/test_tabs.js
@@ -0,0 +1,42 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+EnableEngines(["tabs"]);
+
+/*
+ * The list of phases mapped to their corresponding profiles. The object
+ * here must be in JSON format as it will get parsed by the Python
+ * testrunner. It is parsed by the YAML package, so it relatively flexible.
+ */
+var phases = { phase1: "profile1", phase2: "profile2", phase3: "profile1" };
+
+/*
+ * Tab lists.
+ */
+
+var tabs1 = [
+ { uri: "https://www.mozilla.org/en-US/firefox/", profile: "profile1" },
+ {
+ uri: "https://example.com/",
+ title: "Example Domain",
+ profile: "profile1",
+ },
+];
+
+var tabs2 = [
+ { uri: "https://www.mozilla.org/en-US/contribute/", profile: "profile2" },
+ {
+ uri: "https://example.com/",
+ profile: "profile2",
+ },
+];
+
+/*
+ * Test phases
+ */
+
+Phase("phase1", [[Tabs.add, tabs1], [Sync]]);
+
+Phase("phase2", [[Sync], [Tabs.verify, tabs1], [Tabs.add, tabs2], [Sync]]);
+
+Phase("phase3", [[Sync], [Tabs.verify, tabs2]]);
diff --git a/services/sync/tests/unit/addon1-search.json b/services/sync/tests/unit/addon1-search.json
new file mode 100644
index 0000000000..55f8af8857
--- /dev/null
+++ b/services/sync/tests/unit/addon1-search.json
@@ -0,0 +1,21 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "Non-Restartless Test Extension",
+ "type": "extension",
+ "guid": "addon1@tests.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 485,
+ "url": "http://127.0.0.1:8888/addon1.xpi"
+ }
+ ]
+ },
+ "last_updated": "2011-09-05T20:42:09Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/unit/bootstrap1-search.json b/services/sync/tests/unit/bootstrap1-search.json
new file mode 100644
index 0000000000..8cd1cf43ed
--- /dev/null
+++ b/services/sync/tests/unit/bootstrap1-search.json
@@ -0,0 +1,21 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "Restartless Test Extension",
+ "type": "extension",
+ "guid": "bootstrap1@tests.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 485,
+ "url": "http://127.0.0.1:8888/bootstrap1.xpi"
+ }
+ ]
+ },
+ "last_updated": "2011-09-05T20:42:09Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/unit/head_appinfo.js b/services/sync/tests/unit/head_appinfo.js
new file mode 100644
index 0000000000..79379ccea8
--- /dev/null
+++ b/services/sync/tests/unit/head_appinfo.js
@@ -0,0 +1,58 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* import-globals-from ../../../common/tests/unit/head_helpers.js */
+
+var { XPCOMUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/XPCOMUtils.sys.mjs"
+);
+
+// Required to avoid failures.
+do_get_profile();
+
+// Init FormHistoryStartup and pretend we opened a profile.
+var fhs = Cc["@mozilla.org/satchel/form-history-startup;1"].getService(
+ Ci.nsIObserver
+);
+fhs.observe(null, "profile-after-change", null);
+
+// An app is going to have some prefs set which xpcshell tests don't.
+Services.prefs.setStringPref(
+ "identity.sync.tokenserver.uri",
+ "http://token-server"
+);
+
+// Make sure to provide the right OS so crypto loads the right binaries
+function getOS() {
+ switch (mozinfo.os) {
+ case "win":
+ return "WINNT";
+ case "mac":
+ return "Darwin";
+ default:
+ return "Linux";
+ }
+}
+
+const { updateAppInfo } = ChromeUtils.importESModule(
+ "resource://testing-common/AppInfo.sys.mjs"
+);
+updateAppInfo({
+ name: "XPCShell",
+ ID: "xpcshell@tests.mozilla.org",
+ version: "1",
+ platformVersion: "",
+ OS: getOS(),
+});
+
+// Register resource aliases. Normally done in SyncComponents.manifest.
+function addResourceAlias() {
+ const resProt = Services.io
+ .getProtocolHandler("resource")
+ .QueryInterface(Ci.nsIResProtocolHandler);
+ for (let s of ["common", "sync", "crypto"]) {
+ let uri = Services.io.newURI("resource://gre/modules/services-" + s + "/");
+ resProt.setSubstitution("services-" + s, uri);
+ }
+}
+addResourceAlias();
diff --git a/services/sync/tests/unit/head_errorhandler_common.js b/services/sync/tests/unit/head_errorhandler_common.js
new file mode 100644
index 0000000000..fcf44ec43b
--- /dev/null
+++ b/services/sync/tests/unit/head_errorhandler_common.js
@@ -0,0 +1,195 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* import-globals-from head_appinfo.js */
+/* import-globals-from ../../../common/tests/unit/head_helpers.js */
+/* import-globals-from head_helpers.js */
+/* import-globals-from head_http_server.js */
+
+// This file expects Service to be defined in the global scope when EHTestsCommon
+// is used (from service.js).
+/* global Service */
+
+var { Changeset, EngineManager, Store, SyncEngine, Tracker, LegacyTracker } =
+ ChromeUtils.importESModule("resource://services-sync/engines.sys.mjs");
+var {
+ ABORT_SYNC_COMMAND,
+ CLIENT_NOT_CONFIGURED,
+ CREDENTIALS_CHANGED,
+ DEFAULT_DOWNLOAD_BATCH_SIZE,
+ DEFAULT_GUID_FETCH_BATCH_SIZE,
+ DEFAULT_KEYBUNDLE_NAME,
+ DEVICE_TYPE_DESKTOP,
+ DEVICE_TYPE_MOBILE,
+ ENGINE_APPLY_FAIL,
+ ENGINE_BATCH_INTERRUPTED,
+ ENGINE_DOWNLOAD_FAIL,
+ ENGINE_SUCCEEDED,
+ ENGINE_UNKNOWN_FAIL,
+ ENGINE_UPLOAD_FAIL,
+ HMAC_EVENT_INTERVAL,
+ IDLE_OBSERVER_BACK_DELAY,
+ LOGIN_FAILED,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_LOGIN_REJECTED,
+ LOGIN_FAILED_NETWORK_ERROR,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_NO_USERNAME,
+ LOGIN_FAILED_SERVER_ERROR,
+ LOGIN_SUCCEEDED,
+ MASTER_PASSWORD_LOCKED,
+ MASTER_PASSWORD_LOCKED_RETRY_INTERVAL,
+ MAXIMUM_BACKOFF_INTERVAL,
+ MAX_ERROR_COUNT_BEFORE_BACKOFF,
+ MAX_HISTORY_DOWNLOAD,
+ MAX_HISTORY_UPLOAD,
+ METARECORD_DOWNLOAD_FAIL,
+ MINIMUM_BACKOFF_INTERVAL,
+ MULTI_DEVICE_THRESHOLD,
+ NO_SYNC_NODE_FOUND,
+ NO_SYNC_NODE_INTERVAL,
+ OVER_QUOTA,
+ PREFS_BRANCH,
+ RESPONSE_OVER_QUOTA,
+ SCORE_INCREMENT_MEDIUM,
+ SCORE_INCREMENT_SMALL,
+ SCORE_INCREMENT_XLARGE,
+ SCORE_UPDATE_DELAY,
+ SERVER_MAINTENANCE,
+ SINGLE_USER_THRESHOLD,
+ SQLITE_MAX_VARIABLE_NUMBER,
+ STATUS_DISABLED,
+ STATUS_OK,
+ STORAGE_VERSION,
+ SYNC_FAILED,
+ SYNC_FAILED_PARTIAL,
+ SYNC_KEY_DECODED_LENGTH,
+ SYNC_KEY_ENCODED_LENGTH,
+ SYNC_SUCCEEDED,
+ URI_LENGTH_MAX,
+ VERSION_OUT_OF_DATE,
+ WEAVE_VERSION,
+ kFirefoxShuttingDown,
+ kFirstSyncChoiceNotMade,
+ kSyncBackoffNotMet,
+ kSyncMasterPasswordLocked,
+ kSyncNetworkOffline,
+ kSyncNotConfigured,
+ kSyncWeaveDisabled,
+} = ChromeUtils.importESModule("resource://services-sync/constants.sys.mjs");
+var { BulkKeyBundle, SyncKeyBundle } = ChromeUtils.importESModule(
+ "resource://services-sync/keys.sys.mjs"
+);
+
+// Common code for test_errorhandler_{1,2}.js -- pulled out to make it less
+// monolithic and take less time to execute.
+const EHTestsCommon = {
+ service_unavailable(request, response) {
+ let body = "Service Unavailable";
+ response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
+ response.setHeader("Retry-After", "42");
+ response.bodyOutputStream.write(body, body.length);
+ },
+
+ async sync_httpd_setup() {
+ let clientsEngine = Service.clientsEngine;
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+ let catapultEngine = Service.engineManager.get("catapult");
+ let catapultSyncID = await catapultEngine.resetLocalSyncID();
+ let global = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {
+ clients: { version: clientsEngine.version, syncID: clientsSyncID },
+ catapult: { version: catapultEngine.version, syncID: catapultSyncID },
+ },
+ });
+ let clientsColl = new ServerCollection({}, true);
+
+ // Tracking info/collections.
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+
+ let handler_401 = httpd_handler(401, "Unauthorized");
+ return httpd_setup({
+ // Normal server behaviour.
+ "/1.1/johndoe/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/johndoe/info/collections": collectionsHelper.handler,
+ "/1.1/johndoe/storage/crypto/keys": upd(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler()),
+
+ // Credentials are wrong or node reallocated.
+ "/1.1/janedoe/storage/meta/global": handler_401,
+ "/1.1/janedoe/info/collections": handler_401,
+
+ // Maintenance or overloaded (503 + Retry-After) at info/collections.
+ "/1.1/broken.info/info/collections": EHTestsCommon.service_unavailable,
+
+ // Maintenance or overloaded (503 + Retry-After) at meta/global.
+ "/1.1/broken.meta/storage/meta/global": EHTestsCommon.service_unavailable,
+ "/1.1/broken.meta/info/collections": collectionsHelper.handler,
+
+ // Maintenance or overloaded (503 + Retry-After) at crypto/keys.
+ "/1.1/broken.keys/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/broken.keys/info/collections": collectionsHelper.handler,
+ "/1.1/broken.keys/storage/crypto/keys": EHTestsCommon.service_unavailable,
+
+ // Maintenance or overloaded (503 + Retry-After) at wiping collection.
+ "/1.1/broken.wipe/info/collections": collectionsHelper.handler,
+ "/1.1/broken.wipe/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/broken.wipe/storage/crypto/keys": upd(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/broken.wipe/storage": EHTestsCommon.service_unavailable,
+ "/1.1/broken.wipe/storage/clients": upd("clients", clientsColl.handler()),
+ "/1.1/broken.wipe/storage/catapult": EHTestsCommon.service_unavailable,
+ });
+ },
+
+ CatapultEngine: (function () {
+ function CatapultEngine() {
+ SyncEngine.call(this, "Catapult", Service);
+ }
+ CatapultEngine.prototype = {
+ exception: null, // tests fill this in
+ async _sync() {
+ if (this.exception) {
+ throw this.exception;
+ }
+ },
+ };
+ Object.setPrototypeOf(CatapultEngine.prototype, SyncEngine.prototype);
+
+ return CatapultEngine;
+ })(),
+
+ async generateCredentialsChangedFailure() {
+ // Make sync fail due to changed credentials. We simply re-encrypt
+ // the keys with a different Sync Key, without changing the local one.
+ let newSyncKeyBundle = new BulkKeyBundle("crypto");
+ await newSyncKeyBundle.generateRandom();
+ let keys = Service.collectionKeys.asWBO();
+ await keys.encrypt(newSyncKeyBundle);
+ return keys.upload(Service.resource(Service.cryptoKeysURL));
+ },
+
+ async setUp(server) {
+ syncTestLogging();
+ await configureIdentity({ username: "johndoe" }, server);
+ return EHTestsCommon.generateAndUploadKeys();
+ },
+
+ async generateAndUploadKeys() {
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ let response = await serverKeys.upload(
+ Service.resource(Service.cryptoKeysURL)
+ );
+ return response.success;
+ },
+};
diff --git a/services/sync/tests/unit/head_helpers.js b/services/sync/tests/unit/head_helpers.js
new file mode 100644
index 0000000000..e79e55e57f
--- /dev/null
+++ b/services/sync/tests/unit/head_helpers.js
@@ -0,0 +1,709 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* import-globals-from head_appinfo.js */
+/* import-globals-from ../../../common/tests/unit/head_helpers.js */
+/* import-globals-from head_errorhandler_common.js */
+/* import-globals-from head_http_server.js */
+
+// This file expects Service to be defined in the global scope when EHTestsCommon
+// is used (from service.js).
+/* global Service */
+
+var { AddonTestUtils, MockAsyncShutdown } = ChromeUtils.importESModule(
+ "resource://testing-common/AddonTestUtils.sys.mjs"
+);
+var { Async } = ChromeUtils.importESModule(
+ "resource://services-common/async.sys.mjs"
+);
+var { CommonUtils } = ChromeUtils.importESModule(
+ "resource://services-common/utils.sys.mjs"
+);
+var { PlacesTestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/PlacesTestUtils.sys.mjs"
+);
+var { sinon } = ChromeUtils.importESModule(
+ "resource://testing-common/Sinon.sys.mjs"
+);
+var { SerializableSet, Svc, Utils, getChromeWindow } =
+ ChromeUtils.importESModule("resource://services-sync/util.sys.mjs");
+var { XPCOMUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/XPCOMUtils.sys.mjs"
+);
+var { PlacesUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/PlacesUtils.sys.mjs"
+);
+var { PlacesSyncUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/PlacesSyncUtils.sys.mjs"
+);
+var { ObjectUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/ObjectUtils.sys.mjs"
+);
+var {
+ MockFxaStorageManager,
+ SyncTestingInfrastructure,
+ configureFxAccountIdentity,
+ configureIdentity,
+ encryptPayload,
+ getLoginTelemetryScalar,
+ makeFxAccountsInternalMock,
+ makeIdentityConfig,
+ promiseNamedTimer,
+ promiseZeroTimer,
+ sumHistogram,
+ syncTestLogging,
+ waitForZeroTimer,
+} = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/utils.sys.mjs"
+);
+ChromeUtils.defineESModuleGetters(this, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+});
+
+add_setup(async function head_setup() {
+ // Initialize logging. This will sometimes be reset by a pref reset,
+ // so it's also called as part of SyncTestingInfrastructure().
+ syncTestLogging();
+ // If a test imports Service, make sure it is initialized first.
+ if (typeof Service !== "undefined") {
+ await Service.promiseInitialized;
+ }
+});
+
+ChromeUtils.defineLazyGetter(this, "SyncPingSchema", function () {
+ let { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+ );
+ let { NetUtil } = ChromeUtils.importESModule(
+ "resource://gre/modules/NetUtil.sys.mjs"
+ );
+ let stream = Cc["@mozilla.org/network/file-input-stream;1"].createInstance(
+ Ci.nsIFileInputStream
+ );
+ let schema;
+ try {
+ let schemaFile = do_get_file("sync_ping_schema.json");
+ stream.init(schemaFile, FileUtils.MODE_RDONLY, FileUtils.PERMS_FILE, 0);
+
+ let bytes = NetUtil.readInputStream(stream, stream.available());
+ schema = JSON.parse(new TextDecoder().decode(bytes));
+ } finally {
+ stream.close();
+ }
+
+ // Allow tests to make whatever engines they want, this shouldn't cause
+ // validation failure.
+ schema.definitions.engine.properties.name = { type: "string" };
+ return schema;
+});
+
+ChromeUtils.defineLazyGetter(this, "SyncPingValidator", function () {
+ const { JsonSchema } = ChromeUtils.importESModule(
+ "resource://gre/modules/JsonSchema.sys.mjs"
+ );
+ return new JsonSchema.Validator(SyncPingSchema);
+});
+
+// This is needed for loadAddonTestFunctions().
+var gGlobalScope = this;
+
+function ExtensionsTestPath(path) {
+ if (path[0] != "/") {
+ throw Error("Path must begin with '/': " + path);
+ }
+
+ return "../../../../toolkit/mozapps/extensions/test/xpcshell" + path;
+}
+
+function webExtensionsTestPath(path) {
+ if (path[0] != "/") {
+ throw Error("Path must begin with '/': " + path);
+ }
+
+ return "../../../../toolkit/components/extensions/test/xpcshell" + path;
+}
+
+/**
+ * Loads the WebExtension test functions by importing its test file.
+ */
+function loadWebExtensionTestFunctions() {
+ /* import-globals-from ../../../../toolkit/components/extensions/test/xpcshell/head_sync.js */
+ const path = webExtensionsTestPath("/head_sync.js");
+ let file = do_get_file(path);
+ let uri = Services.io.newFileURI(file);
+ Services.scriptloader.loadSubScript(uri.spec, gGlobalScope);
+}
+
+/**
+ * Installs an add-on from an addonInstall
+ *
+ * @param install addonInstall instance to install
+ */
+async function installAddonFromInstall(install) {
+ await install.install();
+
+ Assert.notEqual(null, install.addon);
+ Assert.notEqual(null, install.addon.syncGUID);
+
+ return install.addon;
+}
+
+/**
+ * Convenience function to install an add-on from the extensions unit tests.
+ *
+ * @param file
+ * Add-on file to install.
+ * @param reconciler
+ * addons reconciler, if passed we will wait on the events to be
+ * processed before resolving
+ * @return addon object that was installed
+ */
+async function installAddon(file, reconciler = null) {
+ let install = await AddonManager.getInstallForFile(file);
+ Assert.notEqual(null, install);
+ const addon = await installAddonFromInstall(install);
+ if (reconciler) {
+ await reconciler.queueCaller.promiseCallsComplete();
+ }
+ return addon;
+}
+
+/**
+ * Convenience function to uninstall an add-on.
+ *
+ * @param addon
+ * Addon instance to uninstall
+ * @param reconciler
+ * addons reconciler, if passed we will wait on the events to be
+ * processed before resolving
+ */
+async function uninstallAddon(addon, reconciler = null) {
+ const uninstallPromise = new Promise(res => {
+ let listener = {
+ onUninstalled(uninstalled) {
+ if (uninstalled.id == addon.id) {
+ AddonManager.removeAddonListener(listener);
+ res(uninstalled);
+ }
+ },
+ };
+ AddonManager.addAddonListener(listener);
+ });
+ addon.uninstall();
+ await uninstallPromise;
+ if (reconciler) {
+ await reconciler.queueCaller.promiseCallsComplete();
+ }
+}
+
+async function generateNewKeys(collectionKeys, collections = null) {
+ let wbo = await collectionKeys.generateNewKeysWBO(collections);
+ let modified = new_timestamp();
+ collectionKeys.setContents(wbo.cleartext, modified);
+}
+
+// Helpers for testing open tabs.
+// These reflect part of the internal structure of TabEngine,
+// and stub part of Service.wm.
+
+function mockShouldSkipWindow(win) {
+ return win.closed || win.mockIsPrivate;
+}
+
+function mockGetTabState(tab) {
+ return tab;
+}
+
+function mockGetWindowEnumerator(urls) {
+ let elements = [];
+
+ const numWindows = 1;
+ for (let w = 0; w < numWindows; ++w) {
+ let tabs = [];
+ let win = {
+ closed: false,
+ mockIsPrivate: false,
+ gBrowser: {
+ tabs,
+ },
+ };
+ elements.push(win);
+
+ let lastAccessed = 2000;
+ for (let url of urls) {
+ tabs.push({
+ linkedBrowser: {
+ currentURI: Services.io.newURI(url),
+ contentTitle: "title",
+ },
+ lastAccessed,
+ });
+ lastAccessed += 1000;
+ }
+ }
+
+ // Always include a closed window and a private window.
+ elements.push({
+ closed: true,
+ mockIsPrivate: false,
+ gBrowser: {
+ tabs: [],
+ },
+ });
+
+ elements.push({
+ closed: false,
+ mockIsPrivate: true,
+ gBrowser: {
+ tabs: [],
+ },
+ });
+
+ return elements.values();
+}
+
+// Helper function to get the sync telemetry and add the typically used test
+// engine names to its list of allowed engines.
+function get_sync_test_telemetry() {
+ let { SyncTelemetry } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+ );
+ SyncTelemetry.tryRefreshDevices = function () {};
+ let testEngines = ["rotary", "steam", "sterling", "catapult", "nineties"];
+ for (let engineName of testEngines) {
+ SyncTelemetry.allowedEngines.add(engineName);
+ }
+ SyncTelemetry.submissionInterval = -1;
+ return SyncTelemetry;
+}
+
+function assert_valid_ping(record) {
+ // Our JSON validator does not like `undefined` values, even though they will
+ // be skipped when we serialize to JSON.
+ record = JSON.parse(JSON.stringify(record));
+
+ // This is called as the test harness tears down due to shutdown. This
+ // will typically have no recorded syncs, and the validator complains about
+ // it. So ignore such records (but only ignore when *both* shutdown and
+ // no Syncs - either of them not being true might be an actual problem)
+ if (record && (record.why != "shutdown" || !!record.syncs.length)) {
+ const result = SyncPingValidator.validate(record);
+ if (!result.valid) {
+ if (result.errors.length) {
+ // validation failed - using a simple |deepEqual([], errors)| tends to
+ // truncate the validation errors in the output and doesn't show that
+ // the ping actually was - so be helpful.
+ info("telemetry ping validation failed");
+ info("the ping data is: " + JSON.stringify(record, undefined, 2));
+ info(
+ "the validation failures: " +
+ JSON.stringify(result.errors, undefined, 2)
+ );
+ ok(
+ false,
+ "Sync telemetry ping validation failed - see output above for details"
+ );
+ }
+ }
+ equal(record.version, 1);
+ record.syncs.forEach(p => {
+ lessOrEqual(p.when, Date.now());
+ });
+ }
+}
+
+// Asserts that `ping` is a ping that doesn't contain any failure information
+function assert_success_ping(ping) {
+ ok(!!ping);
+ assert_valid_ping(ping);
+ ping.syncs.forEach(record => {
+ ok(!record.failureReason, JSON.stringify(record.failureReason));
+ equal(undefined, record.status);
+ greater(record.engines.length, 0);
+ for (let e of record.engines) {
+ ok(!e.failureReason);
+ equal(undefined, e.status);
+ if (e.validation) {
+ equal(undefined, e.validation.problems);
+ equal(undefined, e.validation.failureReason);
+ }
+ if (e.outgoing) {
+ for (let o of e.outgoing) {
+ equal(undefined, o.failed);
+ notEqual(undefined, o.sent);
+ }
+ }
+ if (e.incoming) {
+ equal(undefined, e.incoming.failed);
+ equal(undefined, e.incoming.newFailed);
+ notEqual(undefined, e.incoming.applied || e.incoming.reconciled);
+ }
+ }
+ });
+}
+
+// Hooks into telemetry to validate all pings after calling.
+function validate_all_future_pings() {
+ let telem = get_sync_test_telemetry();
+ telem.submit = assert_valid_ping;
+}
+
+function wait_for_pings(expectedPings) {
+ return new Promise(resolve => {
+ let telem = get_sync_test_telemetry();
+ let oldSubmit = telem.submit;
+ let pings = [];
+ telem.submit = function (record) {
+ pings.push(record);
+ if (pings.length == expectedPings) {
+ telem.submit = oldSubmit;
+ resolve(pings);
+ }
+ };
+ });
+}
+
+async function wait_for_ping(callback, allowErrorPings, getFullPing = false) {
+ let pingsPromise = wait_for_pings(1);
+ await callback();
+ let [record] = await pingsPromise;
+ if (allowErrorPings) {
+ assert_valid_ping(record);
+ } else {
+ assert_success_ping(record);
+ }
+ if (getFullPing) {
+ return record;
+ }
+ equal(record.syncs.length, 1);
+ return record.syncs[0];
+}
+
+// Perform a sync and validate all telemetry caused by the sync. If fnValidate
+// is null, we just check the ping records success. If fnValidate is specified,
+// then the sync must have recorded just a single sync, and that sync will be
+// passed to the function to be checked.
+async function sync_and_validate_telem(
+ fnValidate = null,
+ wantFullPing = false
+) {
+ let numErrors = 0;
+ let telem = get_sync_test_telemetry();
+ let oldSubmit = telem.submit;
+ try {
+ telem.submit = function (record) {
+ // This is called via an observer, so failures here don't cause the test
+ // to fail :(
+ try {
+ // All pings must be valid.
+ assert_valid_ping(record);
+ if (fnValidate) {
+ // for historical reasons most of these callbacks expect a "sync"
+ // record, not the entire ping.
+ if (wantFullPing) {
+ fnValidate(record);
+ } else {
+ Assert.equal(record.syncs.length, 1);
+ fnValidate(record.syncs[0]);
+ }
+ } else {
+ // no validation function means it must be a "success" ping.
+ assert_success_ping(record);
+ }
+ } catch (ex) {
+ print("Failure in ping validation callback", ex, "\n", ex.stack);
+ numErrors += 1;
+ }
+ };
+ await Service.sync();
+ Assert.ok(numErrors == 0, "There were telemetry validation errors");
+ } finally {
+ telem.submit = oldSubmit;
+ }
+}
+
+// Used for the (many) cases where we do a 'partial' sync, where only a single
+// engine is actually synced, but we still want to ensure we're generating a
+// valid ping. Returns a promise that resolves to the ping, or rejects with the
+// thrown error after calling an optional callback.
+async function sync_engine_and_validate_telem(
+ engine,
+ allowErrorPings,
+ onError,
+ wantFullPing = false
+) {
+ let telem = get_sync_test_telemetry();
+ let caughtError = null;
+ // Clear out status, so failures from previous syncs won't show up in the
+ // telemetry ping.
+ let { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+ );
+ Status._engines = {};
+ Status.partial = false;
+ // Ideally we'd clear these out like we do with engines, (probably via
+ // Status.resetSync()), but this causes *numerous* tests to fail, so we just
+ // assume that if no failureReason or engine failures are set, and the
+ // status properties are the same as they were initially, that it's just
+ // a leftover.
+ // This is only an issue since we're triggering the sync of just one engine,
+ // without doing any other parts of the sync.
+ let initialServiceStatus = Status._service;
+ let initialSyncStatus = Status._sync;
+
+ let oldSubmit = telem.submit;
+ let submitPromise = new Promise((resolve, reject) => {
+ telem.submit = function (ping) {
+ telem.submit = oldSubmit;
+ ping.syncs.forEach(record => {
+ if (record && record.status) {
+ // did we see anything to lead us to believe that something bad actually happened
+ let realProblem =
+ record.failureReason ||
+ record.engines.some(e => {
+ if (e.failureReason || e.status) {
+ return true;
+ }
+ if (e.outgoing && e.outgoing.some(o => o.failed > 0)) {
+ return true;
+ }
+ return e.incoming && e.incoming.failed;
+ });
+ if (!realProblem) {
+ // no, so if the status is the same as it was initially, just assume
+ // that its leftover and that we can ignore it.
+ if (record.status.sync && record.status.sync == initialSyncStatus) {
+ delete record.status.sync;
+ }
+ if (
+ record.status.service &&
+ record.status.service == initialServiceStatus
+ ) {
+ delete record.status.service;
+ }
+ if (!record.status.sync && !record.status.service) {
+ delete record.status;
+ }
+ }
+ }
+ });
+ if (allowErrorPings) {
+ assert_valid_ping(ping);
+ } else {
+ assert_success_ping(ping);
+ }
+ equal(ping.syncs.length, 1);
+ if (caughtError) {
+ if (onError) {
+ onError(ping.syncs[0], ping);
+ }
+ reject(caughtError);
+ } else if (wantFullPing) {
+ resolve(ping);
+ } else {
+ resolve(ping.syncs[0]);
+ }
+ };
+ });
+ // neuter the scheduler as it interacts badly with some of the tests - the
+ // engine being synced usually isn't the registered engine, so we see
+ // scored incremented and not removed, which schedules unexpected syncs.
+ let oldObserve = Service.scheduler.observe;
+ Service.scheduler.observe = () => {};
+ try {
+ Svc.Obs.notify("weave:service:sync:start");
+ try {
+ await engine.sync();
+ } catch (e) {
+ caughtError = e;
+ }
+ if (caughtError) {
+ Svc.Obs.notify("weave:service:sync:error", caughtError);
+ } else {
+ Svc.Obs.notify("weave:service:sync:finish");
+ }
+ } finally {
+ Service.scheduler.observe = oldObserve;
+ }
+ return submitPromise;
+}
+
+// Returns a promise that resolves once the specified observer notification
+// has fired.
+function promiseOneObserver(topic, callback) {
+ return new Promise((resolve, reject) => {
+ let observer = function (subject, data) {
+ Svc.Obs.remove(topic, observer);
+ resolve({ subject, data });
+ };
+ Svc.Obs.add(topic, observer);
+ });
+}
+
+async function registerRotaryEngine() {
+ let { RotaryEngine } = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/rotaryengine.sys.mjs"
+ );
+ await Service.engineManager.clear();
+
+ await Service.engineManager.register(RotaryEngine);
+ let engine = Service.engineManager.get("rotary");
+ let syncID = await engine.resetLocalSyncID();
+ engine.enabled = true;
+
+ return { engine, syncID, tracker: engine._tracker };
+}
+
+// Set the validation prefs to attempt validation every time to avoid non-determinism.
+function enableValidationPrefs(engines = ["bookmarks"]) {
+ for (let engine of engines) {
+ Svc.PrefBranch.setIntPref(`engine.${engine}.validation.interval`, 0);
+ Svc.PrefBranch.setIntPref(
+ `engine.${engine}.validation.percentageChance`,
+ 100
+ );
+ Svc.PrefBranch.setIntPref(`engine.${engine}.validation.maxRecords`, -1);
+ Svc.PrefBranch.setBoolPref(`engine.${engine}.validation.enabled`, true);
+ }
+}
+
+async function serverForEnginesWithKeys(users, engines, callback) {
+ // Generate and store a fake default key bundle to avoid resetting the client
+ // before the first sync.
+ let wbo = await Service.collectionKeys.generateNewKeysWBO();
+ let modified = new_timestamp();
+ Service.collectionKeys.setContents(wbo.cleartext, modified);
+
+ let allEngines = [Service.clientsEngine].concat(engines);
+
+ let globalEngines = {};
+ for (let engine of allEngines) {
+ let syncID = await engine.resetLocalSyncID();
+ globalEngines[engine.name] = { version: engine.version, syncID };
+ }
+
+ let contents = {
+ meta: {
+ global: {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: globalEngines,
+ },
+ },
+ crypto: {
+ keys: encryptPayload(wbo.cleartext),
+ },
+ };
+ for (let engine of allEngines) {
+ contents[engine.name] = {};
+ }
+
+ return serverForUsers(users, contents, callback);
+}
+
+async function serverForFoo(engine, callback) {
+ // The bookmarks engine *always* tracks changes, meaning we might try
+ // and sync due to the bookmarks we ourselves create! Worse, because we
+ // do an engine sync only, there's no locking - so we end up with multiple
+ // syncs running. Neuter that by making the threshold very large.
+ Service.scheduler.syncThreshold = 10000000;
+ return serverForEnginesWithKeys({ foo: "password" }, engine, callback);
+}
+
+// Places notifies history observers asynchronously, so `addVisits` might return
+// before the tracker receives the notification. This helper registers an
+// observer that resolves once the expected notification fires.
+async function promiseVisit(expectedType, expectedURI) {
+ return new Promise(resolve => {
+ function done(type, uri) {
+ if (uri == expectedURI.spec && type == expectedType) {
+ PlacesObservers.removeListener(
+ ["page-visited", "page-removed"],
+ observer.handlePlacesEvents
+ );
+ resolve();
+ }
+ }
+ let observer = {
+ handlePlacesEvents(events) {
+ Assert.equal(events.length, 1);
+
+ if (events[0].type === "page-visited") {
+ done("added", events[0].url);
+ } else if (events[0].type === "page-removed") {
+ Assert.ok(events[0].isRemovedFromStore);
+ done("removed", events[0].url);
+ }
+ },
+ };
+ PlacesObservers.addListener(
+ ["page-visited", "page-removed"],
+ observer.handlePlacesEvents
+ );
+ });
+}
+
+async function addVisit(
+ suffix,
+ referrer = null,
+ transition = PlacesUtils.history.TRANSITION_LINK
+) {
+ let uriString = "http://getfirefox.com/" + suffix;
+ let uri = CommonUtils.makeURI(uriString);
+ _("Adding visit for URI " + uriString);
+
+ let visitAddedPromise = promiseVisit("added", uri);
+ await PlacesTestUtils.addVisits({
+ uri,
+ visitDate: Date.now() * 1000,
+ transition,
+ referrer,
+ });
+ await visitAddedPromise;
+
+ return uri;
+}
+
+function bookmarkNodesToInfos(nodes) {
+ return nodes.map(node => {
+ let info = {
+ guid: node.guid,
+ index: node.index,
+ };
+ if (node.children) {
+ info.children = bookmarkNodesToInfos(node.children);
+ }
+ return info;
+ });
+}
+
+async function assertBookmarksTreeMatches(rootGuid, expected, message) {
+ let root = await PlacesUtils.promiseBookmarksTree(rootGuid, {
+ includeItemIds: true,
+ });
+ let actual = bookmarkNodesToInfos(root.children);
+
+ if (!ObjectUtils.deepEqual(actual, expected)) {
+ _(`Expected structure for ${rootGuid}`, JSON.stringify(expected));
+ _(`Actual structure for ${rootGuid}`, JSON.stringify(actual));
+ throw new Assert.constructor.AssertionError({ actual, expected, message });
+ }
+}
+
+function add_bookmark_test(task) {
+ const { BookmarksEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/bookmarks.sys.mjs"
+ );
+
+ add_task(async function () {
+ _(`Running bookmarks test ${task.name}`);
+ let engine = new BookmarksEngine(Service);
+ await engine.initialize();
+ await engine._resetClient();
+ try {
+ await task(engine);
+ } finally {
+ await engine.finalize();
+ }
+ });
+}
diff --git a/services/sync/tests/unit/head_http_server.js b/services/sync/tests/unit/head_http_server.js
new file mode 100644
index 0000000000..84dbb33951
--- /dev/null
+++ b/services/sync/tests/unit/head_http_server.js
@@ -0,0 +1,1265 @@
+/* import-globals-from head_appinfo.js */
+/* import-globals-from ../../../common/tests/unit/head_helpers.js */
+/* import-globals-from head_helpers.js */
+
+var Cm = Components.manager;
+
+// Shared logging for all HTTP server functions.
+var { Log } = ChromeUtils.importESModule("resource://gre/modules/Log.sys.mjs");
+var { CommonUtils } = ChromeUtils.importESModule(
+ "resource://services-common/utils.sys.mjs"
+);
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+var {
+ MockFxaStorageManager,
+ SyncTestingInfrastructure,
+ configureFxAccountIdentity,
+ configureIdentity,
+ encryptPayload,
+ getLoginTelemetryScalar,
+ makeFxAccountsInternalMock,
+ makeIdentityConfig,
+ promiseNamedTimer,
+ promiseZeroTimer,
+ sumHistogram,
+ syncTestLogging,
+ waitForZeroTimer,
+} = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/utils.sys.mjs"
+);
+
+const SYNC_HTTP_LOGGER = "Sync.Test.Server";
+
+// While the sync code itself uses 1.5, the tests hard-code 1.1,
+// so we're sticking with 1.1 here.
+const SYNC_API_VERSION = "1.1";
+
+// Use the same method that record.js does, which mirrors the server.
+// The server returns timestamps with 1/100 sec granularity. Note that this is
+// subject to change: see Bug 650435.
+function new_timestamp() {
+ return round_timestamp(Date.now());
+}
+
+// Rounds a millisecond timestamp `t` to seconds, with centisecond precision.
+function round_timestamp(t) {
+ return Math.round(t / 10) / 100;
+}
+
+function return_timestamp(request, response, timestamp) {
+ if (!timestamp) {
+ timestamp = new_timestamp();
+ }
+ let body = "" + timestamp;
+ response.setHeader("X-Weave-Timestamp", body);
+ response.setStatusLine(request.httpVersion, 200, "OK");
+ writeBytesToOutputStream(response.bodyOutputStream, body);
+ return timestamp;
+}
+
+function has_hawk_header(req) {
+ return (
+ req.hasHeader("Authorization") &&
+ req.getHeader("Authorization").startsWith("Hawk")
+ );
+}
+
+function basic_auth_header(user, password) {
+ return "Basic " + btoa(user + ":" + CommonUtils.encodeUTF8(password));
+}
+
+function basic_auth_matches(req, user, password) {
+ if (!req.hasHeader("Authorization")) {
+ return false;
+ }
+
+ let expected = basic_auth_header(user, CommonUtils.encodeUTF8(password));
+ return req.getHeader("Authorization") == expected;
+}
+
+function httpd_basic_auth_handler(body, metadata, response) {
+ if (basic_auth_matches(metadata, "guest", "guest")) {
+ response.setStatusLine(metadata.httpVersion, 200, "OK, authorized");
+ response.setHeader("WWW-Authenticate", 'Basic realm="secret"', false);
+ } else {
+ body = "This path exists and is protected - failed";
+ response.setStatusLine(metadata.httpVersion, 401, "Unauthorized");
+ response.setHeader("WWW-Authenticate", 'Basic realm="secret"', false);
+ }
+ writeBytesToOutputStream(response.bodyOutputStream, body);
+}
+
+/*
+ * Represent a WBO on the server
+ */
+function ServerWBO(id, initialPayload, modified) {
+ if (!id) {
+ throw new Error("No ID for ServerWBO!");
+ }
+ this.id = id;
+ if (!initialPayload) {
+ return;
+ }
+
+ if (typeof initialPayload == "object") {
+ initialPayload = JSON.stringify(initialPayload);
+ }
+ this.payload = initialPayload;
+ this.modified = modified || new_timestamp();
+ this.sortindex = 0;
+}
+ServerWBO.prototype = {
+ get data() {
+ return JSON.parse(this.payload);
+ },
+
+ get() {
+ return { id: this.id, modified: this.modified, payload: this.payload };
+ },
+
+ put(input) {
+ input = JSON.parse(input);
+ this.payload = input.payload;
+ this.modified = new_timestamp();
+ this.sortindex = input.sortindex || 0;
+ },
+
+ delete() {
+ delete this.payload;
+ delete this.modified;
+ delete this.sortindex;
+ },
+
+ // This handler sets `newModified` on the response body if the collection
+ // timestamp has changed. This allows wrapper handlers to extract information
+ // that otherwise would exist only in the body stream.
+ handler() {
+ let self = this;
+
+ return function (request, response) {
+ var statusCode = 200;
+ var status = "OK";
+ var body;
+
+ switch (request.method) {
+ case "GET":
+ if (self.payload) {
+ body = JSON.stringify(self.get());
+ } else {
+ statusCode = 404;
+ status = "Not Found";
+ body = "Not Found";
+ }
+ break;
+
+ case "PUT":
+ self.put(readBytesFromInputStream(request.bodyInputStream));
+ body = JSON.stringify(self.modified);
+ response.setHeader("Content-Type", "application/json");
+ response.newModified = self.modified;
+ break;
+
+ case "DELETE":
+ self.delete();
+ let ts = new_timestamp();
+ body = JSON.stringify(ts);
+ response.setHeader("Content-Type", "application/json");
+ response.newModified = ts;
+ break;
+ }
+ response.setHeader("X-Weave-Timestamp", "" + new_timestamp(), false);
+ response.setStatusLine(request.httpVersion, statusCode, status);
+ writeBytesToOutputStream(response.bodyOutputStream, body);
+ };
+ },
+
+ /**
+ * Get the cleartext data stored in the payload.
+ *
+ * This isn't `get cleartext`, because `x.cleartext.blah = 3;` wouldn't work,
+ * which seems like a footgun.
+ */
+ getCleartext() {
+ return JSON.parse(JSON.parse(this.payload).ciphertext);
+ },
+
+ /**
+ * Setter for getCleartext(), but lets you adjust the modified timestamp too.
+ * Returns this ServerWBO object.
+ */
+ setCleartext(cleartext, modifiedTimestamp = this.modified) {
+ this.payload = JSON.stringify(encryptPayload(cleartext));
+ this.modified = modifiedTimestamp;
+ return this;
+ },
+};
+
+/**
+ * Represent a collection on the server. The '_wbos' attribute is a
+ * mapping of id -> ServerWBO objects.
+ *
+ * Note that if you want these records to be accessible individually,
+ * you need to register their handlers with the server separately, or use a
+ * containing HTTP server that will do so on your behalf.
+ *
+ * @param wbos
+ * An object mapping WBO IDs to ServerWBOs.
+ * @param acceptNew
+ * If true, POSTs to this collection URI will result in new WBOs being
+ * created and wired in on the fly.
+ * @param timestamp
+ * An optional timestamp value to initialize the modified time of the
+ * collection. This should be in the format returned by new_timestamp().
+ *
+ * @return the new ServerCollection instance.
+ *
+ */
+function ServerCollection(wbos, acceptNew, timestamp) {
+ this._wbos = wbos || {};
+ this.acceptNew = acceptNew || false;
+
+ /*
+ * Track modified timestamp.
+ * We can't just use the timestamps of contained WBOs: an empty collection
+ * has a modified time.
+ */
+ this.timestamp = timestamp || new_timestamp();
+ this._log = Log.repository.getLogger(SYNC_HTTP_LOGGER);
+}
+ServerCollection.prototype = {
+ /**
+ * Convenience accessor for our WBO keys.
+ * Excludes deleted items, of course.
+ *
+ * @param filter
+ * A predicate function (applied to the ID and WBO) which dictates
+ * whether to include the WBO's ID in the output.
+ *
+ * @return an array of IDs.
+ */
+ keys: function keys(filter) {
+ let ids = [];
+ for (let [id, wbo] of Object.entries(this._wbos)) {
+ if (wbo.payload && (!filter || filter(id, wbo))) {
+ ids.push(id);
+ }
+ }
+ return ids;
+ },
+
+ /**
+ * Convenience method to get an array of WBOs.
+ * Optionally provide a filter function.
+ *
+ * @param filter
+ * A predicate function, applied to the WBO, which dictates whether to
+ * include the WBO in the output.
+ *
+ * @return an array of ServerWBOs.
+ */
+ wbos: function wbos(filter) {
+ let os = [];
+ for (let wbo of Object.values(this._wbos)) {
+ if (wbo.payload) {
+ os.push(wbo);
+ }
+ }
+
+ if (filter) {
+ return os.filter(filter);
+ }
+ return os;
+ },
+
+ /**
+ * Convenience method to get an array of parsed ciphertexts.
+ *
+ * @return an array of the payloads of each stored WBO.
+ */
+ payloads() {
+ return this.wbos().map(wbo => wbo.getCleartext());
+ },
+
+ // Just for syntactic elegance.
+ wbo: function wbo(id) {
+ return this._wbos[id];
+ },
+
+ payload: function payload(id) {
+ return this.wbo(id).payload;
+ },
+
+ cleartext(id) {
+ return this.wbo(id).getCleartext();
+ },
+
+ /**
+ * Insert the provided WBO under its ID.
+ *
+ * @return the provided WBO.
+ */
+ insertWBO: function insertWBO(wbo) {
+ this.timestamp = Math.max(this.timestamp, wbo.modified);
+ return (this._wbos[wbo.id] = wbo);
+ },
+
+ /**
+ * Update an existing WBO's cleartext using a callback function that modifies
+ * the record in place, or returns a new record.
+ */
+ updateRecord(id, updateCallback, optTimestamp) {
+ let wbo = this.wbo(id);
+ if (!wbo) {
+ throw new Error("No record with provided ID");
+ }
+ let curCleartext = wbo.getCleartext();
+ // Allow update callback to either return a new cleartext, or modify in place.
+ let newCleartext = updateCallback(curCleartext) || curCleartext;
+ wbo.setCleartext(newCleartext, optTimestamp);
+ // It is already inserted, but we might need to update our timestamp based
+ // on it's `modified` value, if `optTimestamp` was provided.
+ return this.insertWBO(wbo);
+ },
+
+ /**
+ * Insert a record, which may either an object with a cleartext property, or
+ * the cleartext property itself.
+ */
+ insertRecord(record, timestamp = Math.round(Date.now() / 10) / 100) {
+ if (typeof timestamp != "number") {
+ throw new TypeError("insertRecord: Timestamp is not a number.");
+ }
+ if (!record.id) {
+ throw new Error("Attempt to insert record with no id");
+ }
+ // Allow providing either the cleartext directly, or the CryptoWrapper-like.
+ let cleartext = record.cleartext || record;
+ return this.insert(record.id, encryptPayload(cleartext), timestamp);
+ },
+
+ /**
+ * Insert the provided payload as part of a new ServerWBO with the provided
+ * ID.
+ *
+ * @param id
+ * The GUID for the WBO.
+ * @param payload
+ * The payload, as provided to the ServerWBO constructor.
+ * @param modified
+ * An optional modified time for the ServerWBO.
+ *
+ * @return the inserted WBO.
+ */
+ insert: function insert(id, payload, modified) {
+ return this.insertWBO(new ServerWBO(id, payload, modified));
+ },
+
+ /**
+ * Removes an object entirely from the collection.
+ *
+ * @param id
+ * (string) ID to remove.
+ */
+ remove: function remove(id) {
+ delete this._wbos[id];
+ },
+
+ _inResultSet(wbo, options) {
+ return (
+ wbo.payload &&
+ (!options.ids || options.ids.includes(wbo.id)) &&
+ (!options.newer || wbo.modified > options.newer) &&
+ (!options.older || wbo.modified < options.older)
+ );
+ },
+
+ count(options) {
+ options = options || {};
+ let c = 0;
+ for (let wbo of Object.values(this._wbos)) {
+ if (wbo.modified && this._inResultSet(wbo, options)) {
+ c++;
+ }
+ }
+ return c;
+ },
+
+ get(options, request) {
+ let data = [];
+ for (let wbo of Object.values(this._wbos)) {
+ if (wbo.modified && this._inResultSet(wbo, options)) {
+ data.push(wbo);
+ }
+ }
+ switch (options.sort) {
+ case "newest":
+ data.sort((a, b) => b.modified - a.modified);
+ break;
+
+ case "oldest":
+ data.sort((a, b) => a.modified - b.modified);
+ break;
+
+ case "index":
+ data.sort((a, b) => b.sortindex - a.sortindex);
+ break;
+
+ default:
+ if (options.sort) {
+ this._log.error(
+ "Error: client requesting unknown sort order",
+ options.sort
+ );
+ throw new Error("Unknown sort order");
+ }
+ // If the client didn't request a sort order, shuffle the records
+ // to ensure that we don't accidentally depend on the default order.
+ TestUtils.shuffle(data);
+ }
+ if (options.full) {
+ data = data.map(wbo => wbo.get());
+ let start = options.offset || 0;
+ if (options.limit) {
+ let numItemsPastOffset = data.length - start;
+ data = data.slice(start, start + options.limit);
+ // use options as a backchannel to set x-weave-next-offset
+ if (numItemsPastOffset > options.limit) {
+ options.nextOffset = start + options.limit;
+ }
+ } else if (start) {
+ data = data.slice(start);
+ }
+
+ if (request && request.getHeader("accept") == "application/newlines") {
+ this._log.error(
+ "Error: client requesting application/newlines content"
+ );
+ throw new Error(
+ "This server should not serve application/newlines content"
+ );
+ }
+
+ // Use options as a backchannel to report count.
+ options.recordCount = data.length;
+ } else {
+ data = data.map(wbo => wbo.id);
+ let start = options.offset || 0;
+ if (options.limit) {
+ data = data.slice(start, start + options.limit);
+ options.nextOffset = start + options.limit;
+ } else if (start) {
+ data = data.slice(start);
+ }
+ options.recordCount = data.length;
+ }
+ return JSON.stringify(data);
+ },
+
+ post(input) {
+ input = JSON.parse(input);
+ let success = [];
+ let failed = {};
+
+ // This will count records where we have an existing ServerWBO
+ // registered with us as successful and all other records as failed.
+ for (let key in input) {
+ let record = input[key];
+ let wbo = this.wbo(record.id);
+ if (!wbo && this.acceptNew) {
+ this._log.debug(
+ "Creating WBO " + JSON.stringify(record.id) + " on the fly."
+ );
+ wbo = new ServerWBO(record.id);
+ this.insertWBO(wbo);
+ }
+ if (wbo) {
+ wbo.payload = record.payload;
+ wbo.modified = new_timestamp();
+ wbo.sortindex = record.sortindex || 0;
+ success.push(record.id);
+ } else {
+ failed[record.id] = "no wbo configured";
+ }
+ }
+ return { modified: new_timestamp(), success, failed };
+ },
+
+ delete(options) {
+ let deleted = [];
+ for (let wbo of Object.values(this._wbos)) {
+ if (this._inResultSet(wbo, options)) {
+ this._log.debug("Deleting " + JSON.stringify(wbo));
+ deleted.push(wbo.id);
+ wbo.delete();
+ }
+ }
+ return deleted;
+ },
+
+ // This handler sets `newModified` on the response body if the collection
+ // timestamp has changed.
+ handler() {
+ let self = this;
+
+ return function (request, response) {
+ var statusCode = 200;
+ var status = "OK";
+ var body;
+
+ // Parse queryString
+ let options = {};
+ for (let chunk of request.queryString.split("&")) {
+ if (!chunk) {
+ continue;
+ }
+ chunk = chunk.split("=");
+ if (chunk.length == 1) {
+ options[chunk[0]] = "";
+ } else {
+ options[chunk[0]] = chunk[1];
+ }
+ }
+ // The real servers return 400 if ids= is specified without a list of IDs.
+ if (options.hasOwnProperty("ids")) {
+ if (!options.ids) {
+ response.setStatusLine(request.httpVersion, "400", "Bad Request");
+ body = "Bad Request";
+ writeBytesToOutputStream(response.bodyOutputStream, body);
+ return;
+ }
+ options.ids = options.ids.split(",");
+ }
+ if (options.newer) {
+ options.newer = parseFloat(options.newer);
+ }
+ if (options.older) {
+ options.older = parseFloat(options.older);
+ }
+ if (options.limit) {
+ options.limit = parseInt(options.limit, 10);
+ }
+ if (options.offset) {
+ options.offset = parseInt(options.offset, 10);
+ }
+
+ switch (request.method) {
+ case "GET":
+ body = self.get(options, request);
+ // see http://moz-services-docs.readthedocs.io/en/latest/storage/apis-1.5.html
+ // for description of these headers.
+ let { recordCount: records, nextOffset } = options;
+
+ self._log.info("Records: " + records + ", nextOffset: " + nextOffset);
+ if (records != null) {
+ response.setHeader("X-Weave-Records", "" + records);
+ }
+ if (nextOffset) {
+ response.setHeader("X-Weave-Next-Offset", "" + nextOffset);
+ }
+ response.setHeader("X-Last-Modified", "" + self.timestamp);
+ break;
+
+ case "POST":
+ let res = self.post(
+ readBytesFromInputStream(request.bodyInputStream),
+ request
+ );
+ body = JSON.stringify(res);
+ response.newModified = res.modified;
+ break;
+
+ case "DELETE":
+ self._log.debug("Invoking ServerCollection.DELETE.");
+ let deleted = self.delete(options, request);
+ let ts = new_timestamp();
+ body = JSON.stringify(ts);
+ response.newModified = ts;
+ response.deleted = deleted;
+ break;
+ }
+ response.setHeader("X-Weave-Timestamp", "" + new_timestamp(), false);
+
+ // Update the collection timestamp to the appropriate modified time.
+ // This is either a value set by the handler, or the current time.
+ if (request.method != "GET") {
+ self.timestamp =
+ response.newModified >= 0 ? response.newModified : new_timestamp();
+ }
+ response.setHeader("X-Last-Modified", "" + self.timestamp, false);
+
+ response.setStatusLine(request.httpVersion, statusCode, status);
+ writeBytesToOutputStream(response.bodyOutputStream, body);
+ };
+ },
+};
+
+/*
+ * Test setup helpers.
+ */
+function sync_httpd_setup(handlers) {
+ handlers["/1.1/foo/storage/meta/global"] = new ServerWBO(
+ "global",
+ {}
+ ).handler();
+ return httpd_setup(handlers);
+}
+
+/*
+ * Track collection modified times. Return closures.
+ *
+ * XXX - DO NOT USE IN NEW TESTS
+ *
+ * This code has very limited and very hacky timestamp support - the test
+ * server now has more complete and correct support - using this helper
+ * may cause strangeness wrt timestamp headers and 412 responses.
+ */
+function track_collections_helper() {
+ /*
+ * Our tracking object.
+ */
+ let collections = {};
+
+ /*
+ * Update the timestamp of a collection.
+ */
+ function update_collection(coll, ts) {
+ _("Updating collection " + coll + " to " + ts);
+ let timestamp = ts || new_timestamp();
+ collections[coll] = timestamp;
+ }
+
+ /*
+ * Invoke a handler, updating the collection's modified timestamp unless
+ * it's a GET request.
+ */
+ function with_updated_collection(coll, f) {
+ return function (request, response) {
+ f.call(this, request, response);
+
+ // Update the collection timestamp to the appropriate modified time.
+ // This is either a value set by the handler, or the current time.
+ if (request.method != "GET") {
+ update_collection(coll, response.newModified);
+ }
+ };
+ }
+
+ /*
+ * Return the info/collections object.
+ */
+ function info_collections(request, response) {
+ let body = "Error.";
+ switch (request.method) {
+ case "GET":
+ body = JSON.stringify(collections);
+ break;
+ default:
+ throw new Error("Non-GET on info_collections.");
+ }
+
+ response.setHeader("Content-Type", "application/json");
+ response.setHeader("X-Weave-Timestamp", "" + new_timestamp(), false);
+ response.setStatusLine(request.httpVersion, 200, "OK");
+ writeBytesToOutputStream(response.bodyOutputStream, body);
+ }
+
+ return {
+ collections,
+ handler: info_collections,
+ with_updated_collection,
+ update_collection,
+ };
+}
+
+// ===========================================================================//
+// httpd.js-based Sync server. //
+// ===========================================================================//
+
+/**
+ * In general, the preferred way of using SyncServer is to directly introspect
+ * it. Callbacks are available for operations which are hard to verify through
+ * introspection, such as deletions.
+ *
+ * One of the goals of this server is to provide enough hooks for test code to
+ * find out what it needs without monkeypatching. Use this object as your
+ * prototype, and override as appropriate.
+ */
+var SyncServerCallback = {
+ onCollectionDeleted: function onCollectionDeleted(user, collection) {},
+ onItemDeleted: function onItemDeleted(user, collection, wboID) {},
+
+ /**
+ * Called at the top of every request.
+ *
+ * Allows the test to inspect the request. Hooks should be careful not to
+ * modify or change state of the request or they may impact future processing.
+ * The response is also passed so the callback can set headers etc - but care
+ * must be taken to not screw with the response body or headers that may
+ * conflict with normal operation of this server.
+ */
+ onRequest: function onRequest(request, response) {},
+};
+
+/**
+ * Construct a new test Sync server. Takes a callback object (e.g.,
+ * SyncServerCallback) as input.
+ */
+function SyncServer(callback) {
+ this.callback = callback || Object.create(SyncServerCallback);
+ this.server = new HttpServer();
+ this.started = false;
+ this.users = {};
+ this._log = Log.repository.getLogger(SYNC_HTTP_LOGGER);
+
+ // Install our own default handler. This allows us to mess around with the
+ // whole URL space.
+ let handler = this.server._handler;
+ handler._handleDefault = this.handleDefault.bind(this, handler);
+}
+SyncServer.prototype = {
+ server: null, // HttpServer.
+ users: null, // Map of username => {collections, password}.
+
+ /**
+ * Start the SyncServer's underlying HTTP server.
+ *
+ * @param port
+ * The numeric port on which to start. -1 implies the default, a
+ * randomly chosen port.
+ * @param cb
+ * A callback function (of no arguments) which is invoked after
+ * startup.
+ */
+ start: function start(port = -1, cb) {
+ if (this.started) {
+ this._log.warn("Warning: server already started on " + this.port);
+ return;
+ }
+ try {
+ this.server.start(port);
+ let i = this.server.identity;
+ this.port = i.primaryPort;
+ this.baseURI =
+ i.primaryScheme + "://" + i.primaryHost + ":" + i.primaryPort + "/";
+ this.started = true;
+ if (cb) {
+ cb();
+ }
+ } catch (ex) {
+ _("==========================================");
+ _("Got exception starting Sync HTTP server.");
+ _("Error: " + Log.exceptionStr(ex));
+ _("Is there a process already listening on port " + port + "?");
+ _("==========================================");
+ do_throw(ex);
+ }
+ },
+
+ /**
+ * Stop the SyncServer's HTTP server.
+ *
+ * @param cb
+ * A callback function. Invoked after the server has been stopped.
+ *
+ */
+ stop: function stop(cb) {
+ if (!this.started) {
+ this._log.warn(
+ "SyncServer: Warning: server not running. Can't stop me now!"
+ );
+ return;
+ }
+
+ this.server.stop(cb);
+ this.started = false;
+ },
+
+ /**
+ * Return a server timestamp for a record.
+ * The server returns timestamps with 1/100 sec granularity. Note that this is
+ * subject to change: see Bug 650435.
+ */
+ timestamp: function timestamp() {
+ return new_timestamp();
+ },
+
+ /**
+ * Create a new user, complete with an empty set of collections.
+ *
+ * @param username
+ * The username to use. An Error will be thrown if a user by that name
+ * already exists.
+ * @param password
+ * A password string.
+ *
+ * @return a user object, as would be returned by server.user(username).
+ */
+ registerUser: function registerUser(username, password) {
+ if (username in this.users) {
+ throw new Error("User already exists.");
+ }
+ this.users[username] = {
+ password,
+ collections: {},
+ };
+ return this.user(username);
+ },
+
+ userExists: function userExists(username) {
+ return username in this.users;
+ },
+
+ getCollection: function getCollection(username, collection) {
+ return this.users[username].collections[collection];
+ },
+
+ _insertCollection: function _insertCollection(collections, collection, wbos) {
+ let coll = new ServerCollection(wbos, true);
+ coll.collectionHandler = coll.handler();
+ collections[collection] = coll;
+ return coll;
+ },
+
+ createCollection: function createCollection(username, collection, wbos) {
+ if (!(username in this.users)) {
+ throw new Error("Unknown user.");
+ }
+ let collections = this.users[username].collections;
+ if (collection in collections) {
+ throw new Error("Collection already exists.");
+ }
+ return this._insertCollection(collections, collection, wbos);
+ },
+
+ /**
+ * Accept a map like the following:
+ * {
+ * meta: {global: {version: 1, ...}},
+ * crypto: {"keys": {}, foo: {bar: 2}},
+ * bookmarks: {}
+ * }
+ * to cause collections and WBOs to be created.
+ * If a collection already exists, no error is raised.
+ * If a WBO already exists, it will be updated to the new contents.
+ */
+ createContents: function createContents(username, collections) {
+ if (!(username in this.users)) {
+ throw new Error("Unknown user.");
+ }
+ let userCollections = this.users[username].collections;
+ for (let [id, contents] of Object.entries(collections)) {
+ let coll =
+ userCollections[id] || this._insertCollection(userCollections, id);
+ for (let [wboID, payload] of Object.entries(contents)) {
+ coll.insert(wboID, payload);
+ }
+ }
+ },
+
+ /**
+ * Insert a WBO in an existing collection.
+ */
+ insertWBO: function insertWBO(username, collection, wbo) {
+ if (!(username in this.users)) {
+ throw new Error("Unknown user.");
+ }
+ let userCollections = this.users[username].collections;
+ if (!(collection in userCollections)) {
+ throw new Error("Unknown collection.");
+ }
+ userCollections[collection].insertWBO(wbo);
+ return wbo;
+ },
+
+ /**
+ * Delete all of the collections for the named user.
+ *
+ * @param username
+ * The name of the affected user.
+ *
+ * @return a timestamp.
+ */
+ deleteCollections: function deleteCollections(username) {
+ if (!(username in this.users)) {
+ throw new Error("Unknown user.");
+ }
+ let userCollections = this.users[username].collections;
+ for (let name in userCollections) {
+ let coll = userCollections[name];
+ this._log.trace("Bulk deleting " + name + " for " + username + "...");
+ coll.delete({});
+ }
+ this.users[username].collections = {};
+ return this.timestamp();
+ },
+
+ /**
+ * Simple accessor to allow collective binding and abbreviation of a bunch of
+ * methods. Yay!
+ * Use like this:
+ *
+ * let u = server.user("john");
+ * u.collection("bookmarks").wbo("abcdefg").payload; // Etc.
+ *
+ * @return a proxy for the user data stored in this server.
+ */
+ user: function user(username) {
+ let collection = this.getCollection.bind(this, username);
+ let createCollection = this.createCollection.bind(this, username);
+ let createContents = this.createContents.bind(this, username);
+ let modified = function (collectionName) {
+ return collection(collectionName).timestamp;
+ };
+ let deleteCollections = this.deleteCollections.bind(this, username);
+ return {
+ collection,
+ createCollection,
+ createContents,
+ deleteCollections,
+ modified,
+ };
+ },
+
+ /*
+ * Regular expressions for splitting up Sync request paths.
+ * Sync URLs are of the form:
+ * /$apipath/$version/$user/$further
+ * where $further is usually:
+ * storage/$collection/$wbo
+ * or
+ * storage/$collection
+ * or
+ * info/$op
+ * We assume for the sake of simplicity that $apipath is empty.
+ *
+ * N.B., we don't follow any kind of username spec here, because as far as I
+ * can tell there isn't one. See Bug 689671. Instead we follow the Python
+ * server code.
+ *
+ * Path: [all, version, username, first, rest]
+ * Storage: [all, collection?, id?]
+ */
+ pathRE:
+ /^\/([0-9]+(?:\.[0-9]+)?)\/([-._a-zA-Z0-9]+)(?:\/([^\/]+)(?:\/(.+))?)?$/,
+ storageRE: /^([-_a-zA-Z0-9]+)(?:\/([-_a-zA-Z0-9]+)\/?)?$/,
+
+ defaultHeaders: {},
+
+ /**
+ * HTTP response utility.
+ */
+ respond: function respond(req, resp, code, status, body, headers) {
+ resp.setStatusLine(req.httpVersion, code, status);
+ if (!headers) {
+ headers = this.defaultHeaders;
+ }
+ for (let header in headers) {
+ let value = headers[header];
+ resp.setHeader(header, value);
+ }
+ resp.setHeader("X-Weave-Timestamp", "" + this.timestamp(), false);
+ writeBytesToOutputStream(resp.bodyOutputStream, body);
+ },
+
+ /**
+ * This is invoked by the HttpServer. `this` is bound to the SyncServer;
+ * `handler` is the HttpServer's handler.
+ *
+ * TODO: need to use the correct Sync API response codes and errors here.
+ * TODO: Basic Auth.
+ * TODO: check username in path against username in BasicAuth.
+ */
+ handleDefault: function handleDefault(handler, req, resp) {
+ try {
+ this._handleDefault(handler, req, resp);
+ } catch (e) {
+ if (e instanceof HttpError) {
+ this.respond(req, resp, e.code, e.description, "", {});
+ } else {
+ throw e;
+ }
+ }
+ },
+
+ _handleDefault: function _handleDefault(handler, req, resp) {
+ this._log.debug(
+ "SyncServer: Handling request: " + req.method + " " + req.path
+ );
+
+ if (this.callback.onRequest) {
+ this.callback.onRequest(req, resp);
+ }
+
+ let parts = this.pathRE.exec(req.path);
+ if (!parts) {
+ this._log.debug("SyncServer: Unexpected request: bad URL " + req.path);
+ throw HTTP_404;
+ }
+
+ let [, version, username, first, rest] = parts;
+ // Doing a float compare of the version allows for us to pretend there was
+ // a node-reassignment - eg, we could re-assign from "1.1/user/" to
+ // "1.10/user" - this server will then still accept requests with the new
+ // URL while any code in sync itself which compares URLs will see a
+ // different URL.
+ if (parseFloat(version) != parseFloat(SYNC_API_VERSION)) {
+ this._log.debug("SyncServer: Unknown version.");
+ throw HTTP_404;
+ }
+
+ if (!this.userExists(username)) {
+ this._log.debug("SyncServer: Unknown user.");
+ throw HTTP_401;
+ }
+
+ // Hand off to the appropriate handler for this path component.
+ if (first in this.toplevelHandlers) {
+ let newHandler = this.toplevelHandlers[first];
+ return newHandler.call(
+ this,
+ newHandler,
+ req,
+ resp,
+ version,
+ username,
+ rest
+ );
+ }
+ this._log.debug("SyncServer: Unknown top-level " + first);
+ throw HTTP_404;
+ },
+
+ /**
+ * Compute the object that is returned for an info/collections request.
+ */
+ infoCollections: function infoCollections(username) {
+ let responseObject = {};
+ let colls = this.users[username].collections;
+ for (let coll in colls) {
+ responseObject[coll] = colls[coll].timestamp;
+ }
+ this._log.trace(
+ "SyncServer: info/collections returning " + JSON.stringify(responseObject)
+ );
+ return responseObject;
+ },
+
+ /**
+ * Collection of the handler methods we use for top-level path components.
+ */
+ toplevelHandlers: {
+ storage: function handleStorage(
+ handler,
+ req,
+ resp,
+ version,
+ username,
+ rest
+ ) {
+ let respond = this.respond.bind(this, req, resp);
+ if (!rest || !rest.length) {
+ this._log.debug(
+ "SyncServer: top-level storage " + req.method + " request."
+ );
+
+ // TODO: verify if this is spec-compliant.
+ if (req.method != "DELETE") {
+ respond(405, "Method Not Allowed", "[]", { Allow: "DELETE" });
+ return undefined;
+ }
+
+ // Delete all collections and track the timestamp for the response.
+ let timestamp = this.user(username).deleteCollections();
+
+ // Return timestamp and OK for deletion.
+ respond(200, "OK", JSON.stringify(timestamp));
+ return undefined;
+ }
+
+ let match = this.storageRE.exec(rest);
+ if (!match) {
+ this._log.warn("SyncServer: Unknown storage operation " + rest);
+ throw HTTP_404;
+ }
+ let [, collection, wboID] = match;
+ let coll = this.getCollection(username, collection);
+
+ let checkXIUSFailure = () => {
+ if (req.hasHeader("x-if-unmodified-since")) {
+ let xius = parseFloat(req.getHeader("x-if-unmodified-since"));
+ // Sadly the way our tests are setup, we often end up with xius of
+ // zero (typically when syncing just one engine, so the date from
+ // info/collections isn't used) - so we allow that to work.
+ // Further, the Python server treats non-existing collections as
+ // having a timestamp of 0.
+ let collTimestamp = coll ? coll.timestamp : 0;
+ if (xius && xius < collTimestamp) {
+ this._log.info(
+ `x-if-unmodified-since mismatch - request wants ${xius} but our collection has ${collTimestamp}`
+ );
+ respond(412, "precondition failed", "precondition failed");
+ return true;
+ }
+ }
+ return false;
+ };
+
+ switch (req.method) {
+ case "GET": {
+ if (!coll) {
+ if (wboID) {
+ respond(404, "Not found", "Not found");
+ return undefined;
+ }
+ // *cries inside*: - apparently the real sync server returned 200
+ // here for some time, then returned 404 for some time (bug 687299),
+ // and now is back to 200 (bug 963332).
+ respond(200, "OK", "[]");
+ return undefined;
+ }
+ if (!wboID) {
+ return coll.collectionHandler(req, resp);
+ }
+ let wbo = coll.wbo(wboID);
+ if (!wbo) {
+ respond(404, "Not found", "Not found");
+ return undefined;
+ }
+ return wbo.handler()(req, resp);
+ }
+ case "DELETE": {
+ if (!coll) {
+ respond(200, "OK", "{}");
+ return undefined;
+ }
+ if (checkXIUSFailure()) {
+ return undefined;
+ }
+ if (wboID) {
+ let wbo = coll.wbo(wboID);
+ if (wbo) {
+ wbo.delete();
+ this.callback.onItemDeleted(username, collection, wboID);
+ }
+ respond(200, "OK", "{}");
+ return undefined;
+ }
+ coll.collectionHandler(req, resp);
+
+ // Spot if this is a DELETE for some IDs, and don't blow away the
+ // whole collection!
+ //
+ // We already handled deleting the WBOs by invoking the deleted
+ // collection's handler. However, in the case of
+ //
+ // DELETE storage/foobar
+ //
+ // we also need to remove foobar from the collections map. This
+ // clause tries to differentiate the above request from
+ //
+ // DELETE storage/foobar?ids=foo,baz
+ //
+ // and do the right thing.
+ // TODO: less hacky method.
+ if (-1 == req.queryString.indexOf("ids=")) {
+ // When you delete the entire collection, we drop it.
+ this._log.debug("Deleting entire collection.");
+ delete this.users[username].collections[collection];
+ this.callback.onCollectionDeleted(username, collection);
+ }
+
+ // Notify of item deletion.
+ let deleted = resp.deleted || [];
+ for (let i = 0; i < deleted.length; ++i) {
+ this.callback.onItemDeleted(username, collection, deleted[i]);
+ }
+ return undefined;
+ }
+ case "PUT":
+ // PUT and POST have slightly different XIUS semantics - for PUT,
+ // the check is against the item, whereas for POST it is against
+ // the collection. So first, a special-case for PUT.
+ if (req.hasHeader("x-if-unmodified-since")) {
+ let xius = parseFloat(req.getHeader("x-if-unmodified-since"));
+ // treat and xius of zero as if it wasn't specified - this happens
+ // in some of our tests for a new collection.
+ if (xius > 0) {
+ let wbo = coll.wbo(wboID);
+ if (xius < wbo.modified) {
+ this._log.info(
+ `x-if-unmodified-since mismatch - request wants ${xius} but wbo has ${wbo.modified}`
+ );
+ respond(412, "precondition failed", "precondition failed");
+ return undefined;
+ }
+ wbo.handler()(req, resp);
+ coll.timestamp = resp.newModified;
+ return resp;
+ }
+ }
+ // fall through to post.
+ case "POST":
+ if (checkXIUSFailure()) {
+ return undefined;
+ }
+ if (!coll) {
+ coll = this.createCollection(username, collection);
+ }
+
+ if (wboID) {
+ let wbo = coll.wbo(wboID);
+ if (!wbo) {
+ this._log.trace(
+ "SyncServer: creating WBO " + collection + "/" + wboID
+ );
+ wbo = coll.insert(wboID);
+ }
+ // Rather than instantiate each WBO's handler function, do it once
+ // per request. They get hit far less often than do collections.
+ wbo.handler()(req, resp);
+ coll.timestamp = resp.newModified;
+ return resp;
+ }
+ return coll.collectionHandler(req, resp);
+ default:
+ throw new Error("Request method " + req.method + " not implemented.");
+ }
+ },
+
+ info: function handleInfo(handler, req, resp, version, username, rest) {
+ switch (rest) {
+ case "collections":
+ let body = JSON.stringify(this.infoCollections(username));
+ this.respond(req, resp, 200, "OK", body, {
+ "Content-Type": "application/json",
+ });
+ return;
+ case "collection_usage":
+ case "collection_counts":
+ case "quota":
+ // TODO: implement additional info methods.
+ this.respond(req, resp, 200, "OK", "TODO");
+ return;
+ default:
+ // TODO
+ this._log.warn("SyncServer: Unknown info operation " + rest);
+ throw HTTP_404;
+ }
+ },
+ },
+};
+
+/**
+ * Test helper.
+ */
+function serverForUsers(users, contents, callback) {
+ let server = new SyncServer(callback);
+ for (let [user, pass] of Object.entries(users)) {
+ server.registerUser(user, pass);
+ server.createContents(user, contents);
+ }
+ server.start();
+ return server;
+}
diff --git a/services/sync/tests/unit/missing-sourceuri.json b/services/sync/tests/unit/missing-sourceuri.json
new file mode 100644
index 0000000000..dcd487726a
--- /dev/null
+++ b/services/sync/tests/unit/missing-sourceuri.json
@@ -0,0 +1,20 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "Restartless Test Extension",
+ "type": "extension",
+ "guid": "missing-sourceuri@tests.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 485
+ }
+ ]
+ },
+ "last_updated": "2011-09-05T20:42:09Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/unit/missing-xpi-search.json b/services/sync/tests/unit/missing-xpi-search.json
new file mode 100644
index 0000000000..55f6432b29
--- /dev/null
+++ b/services/sync/tests/unit/missing-xpi-search.json
@@ -0,0 +1,21 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "Non-Restartless Test Extension",
+ "type": "extension",
+ "guid": "missing-xpi@tests.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 485,
+ "url": "http://127.0.0.1:8888/THIS_DOES_NOT_EXIST.xpi"
+ }
+ ]
+ },
+ "last_updated": "2011-09-05T20:42:09Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/unit/prefs_test_prefs_store.js b/services/sync/tests/unit/prefs_test_prefs_store.js
new file mode 100644
index 0000000000..63851a6934
--- /dev/null
+++ b/services/sync/tests/unit/prefs_test_prefs_store.js
@@ -0,0 +1,47 @@
+// This is a "preferences" file used by test_prefs_store.js
+
+/* global pref, user_pref */
+
+// The prefs that control what should be synced.
+// Most of these are "default" prefs, so the value itself will not sync.
+pref("services.sync.prefs.sync.testing.int", true);
+pref("services.sync.prefs.sync.testing.string", true);
+pref("services.sync.prefs.sync.testing.bool", true);
+pref("services.sync.prefs.sync.testing.dont.change", true);
+// This is a default pref, but has the special "sync-seen" pref.
+pref("services.sync.prefs.sync.testing.seen", true);
+pref("services.sync.prefs.sync-seen.testing.seen", false);
+
+// this one is a user pref, so it *will* sync.
+user_pref("services.sync.prefs.sync.testing.turned.off", false);
+pref("services.sync.prefs.sync.testing.nonexistent", true);
+pref("services.sync.prefs.sync.testing.default", true);
+pref("services.sync.prefs.sync.testing.synced.url", true);
+// We shouldn't sync the URL, or the flag that says we should sync the pref
+// (otherwise some other client might overwrite our local value).
+user_pref("services.sync.prefs.sync.testing.unsynced.url", true);
+
+// The preference values - these are all user_prefs, otherwise their value
+// will not be synced.
+user_pref("testing.int", 123);
+user_pref("testing.string", "ohai");
+user_pref("testing.bool", true);
+user_pref("testing.dont.change", "Please don't change me.");
+user_pref("testing.turned.off", "I won't get synced.");
+user_pref("testing.not.turned.on", "I won't get synced either!");
+// Some url we don't want to sync
+user_pref(
+ "testing.unsynced.url",
+ "moz-extension://d5d31b00-b944-4afb-bd3d-d0326551a0ae"
+);
+user_pref("testing.synced.url", "https://www.example.com");
+
+// A pref that exists but still has the default value - will be synced with
+// null as the value.
+pref("testing.default", "I'm the default value");
+
+// A pref that has the default value - it will start syncing as soon as
+// we see a change, even if the change is to the default.
+pref("testing.seen", "the value");
+
+// A pref that shouldn't be synced
diff --git a/services/sync/tests/unit/rewrite-search.json b/services/sync/tests/unit/rewrite-search.json
new file mode 100644
index 0000000000..740b6f2c30
--- /dev/null
+++ b/services/sync/tests/unit/rewrite-search.json
@@ -0,0 +1,21 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "Rewrite Test Extension",
+ "type": "extension",
+ "guid": "rewrite@tests.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 485,
+ "url": "http://127.0.0.1:8888/require.xpi?src=api"
+ }
+ ]
+ },
+ "last_updated": "2011-09-05T20:42:09Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/unit/sync_ping_schema.json b/services/sync/tests/unit/sync_ping_schema.json
new file mode 100644
index 0000000000..a9866e3550
--- /dev/null
+++ b/services/sync/tests/unit/sync_ping_schema.json
@@ -0,0 +1,262 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "description": "schema for Sync pings, documentation avaliable in toolkit/components/telemetry/docs/sync-ping.rst",
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["version", "syncs", "why", "uid"],
+ "properties": {
+ "version": { "type": "integer", "minimum": 0 },
+ "os": { "type": "object" },
+ "discarded": { "type": "integer", "minimum": 1 },
+ "why": { "enum": ["shutdown", "schedule", "idchange"] },
+ "uid": {
+ "type": "string",
+ "pattern": "^[0-9a-f]{32}$"
+ },
+ "deviceID": {
+ "type": "string",
+ "pattern": "^[0-9a-f]{64}$"
+ },
+ "devices": {
+ "type": "array",
+ "items": { "$ref": "#/definitions/device" }
+ },
+ "sessionStartDate": { "type": "string" },
+ "syncs": {
+ "type": "array",
+ "minItems": 0,
+ "items": { "$ref": "#/definitions/payload" }
+ },
+ "syncNodeType": {
+ "type": "string"
+ },
+ "events": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#/definitions/event" }
+ },
+ "migrations": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#/definitions/migration" }
+ },
+ "histograms": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "min": { "type": "integer" },
+ "max": { "type": "integer" },
+ "histogram_type": { "type": "integer" },
+ "sum": { "type": "integer" },
+ "ranges": { "type": "array" },
+ "counts": { "type": "array" }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "payload": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["when", "took"],
+ "properties": {
+ "didLogin": { "type": "boolean" },
+ "when": { "type": "integer" },
+ "status": {
+ "type": "object",
+ "anyOf": [{ "required": ["sync"] }, { "required": ["service"] }],
+ "additionalProperties": false,
+ "properties": {
+ "sync": { "type": "string" },
+ "service": { "type": "string" }
+ }
+ },
+ "why": { "type": "string" },
+ "took": { "type": "integer", "minimum": -1 },
+ "failureReason": { "$ref": "#/definitions/error" },
+ "engines": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#/definitions/engine" }
+ }
+ }
+ },
+ "device": {
+ "required": ["id"],
+ "additionalProperties": false,
+ "type": "object",
+ "properties": {
+ "id": { "type": "string", "pattern": "^[0-9a-f]{64}$" },
+ "os": { "type": "string" },
+ "version": { "type": "string" },
+ "type": { "type": "string" },
+ "syncID": { "type": "string", "pattern": "^[0-9a-f]{64}$" }
+ }
+ },
+ "engine": {
+ "required": ["name"],
+ "additionalProperties": false,
+ "properties": {
+ "failureReason": { "$ref": "#/definitions/error" },
+ "name": { "type": "string" },
+ "took": { "type": "integer", "minimum": 1 },
+ "status": { "type": "string" },
+ "incoming": {
+ "type": "object",
+ "additionalProperties": false,
+ "anyOf": [{ "required": ["applied"] }, { "required": ["failed"] }],
+ "properties": {
+ "applied": { "type": "integer", "minimum": 1 },
+ "failed": { "type": "integer", "minimum": 1 },
+ "failedReasons": {
+ "type": "array",
+ "minItems": 1,
+ "$ref": "#/definitions/namedCount"
+ }
+ }
+ },
+ "outgoing": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#/definitions/outgoingBatch" }
+ },
+ "steps": {
+ "type": "array",
+ "minItems": 1,
+ "$ref": "#/definitions/step"
+ },
+ "validation": {
+ "type": "object",
+ "additionalProperties": false,
+ "anyOf": [
+ { "required": ["checked"] },
+ { "required": ["failureReason"] }
+ ],
+ "properties": {
+ "checked": { "type": "integer", "minimum": 0 },
+ "failureReason": { "$ref": "#/definitions/error" },
+ "took": { "type": "integer" },
+ "version": { "type": "integer" },
+ "problems": {
+ "type": "array",
+ "minItems": 1,
+ "$ref": "#/definitions/namedCount"
+ }
+ }
+ }
+ }
+ },
+ "outgoingBatch": {
+ "type": "object",
+ "additionalProperties": false,
+ "anyOf": [{ "required": ["sent"] }, { "required": ["failed"] }],
+ "properties": {
+ "sent": { "type": "integer", "minimum": 1 },
+ "failed": { "type": "integer", "minimum": 1 },
+ "failedReasons": {
+ "type": "array",
+ "minItems": 1,
+ "$ref": "#/definitions/namedCount"
+ }
+ }
+ },
+ "event": {
+ "type": "array",
+ "minItems": 4,
+ "maxItems": 6
+ },
+ "migration": {
+ "oneOf": [{ "$ref": "#/definitions/webextMigration" }]
+ },
+ "webextMigration": {
+ "required": ["type"],
+ "properties": {
+ "type": { "enum": ["webext-storage"] },
+ "entries": { "type": "integer" },
+ "entriesSuccessful": { "type": "integer" },
+ "extensions": { "type": "integer" },
+ "extensionsSuccessful": { "type": "integer" },
+ "openFailure": { "type": "boolean" }
+ }
+ },
+ "error": {
+ "oneOf": [
+ { "$ref": "#/definitions/httpError" },
+ { "$ref": "#/definitions/nsError" },
+ { "$ref": "#/definitions/shutdownError" },
+ { "$ref": "#/definitions/authError" },
+ { "$ref": "#/definitions/otherError" },
+ { "$ref": "#/definitions/unexpectedError" },
+ { "$ref": "#/definitions/sqlError" }
+ ]
+ },
+ "httpError": {
+ "required": ["name", "code"],
+ "properties": {
+ "name": { "enum": ["httperror"] },
+ "code": { "type": "integer" }
+ }
+ },
+ "nsError": {
+ "required": ["name", "code"],
+ "properties": {
+ "name": { "enum": ["nserror"] },
+ "code": { "type": "integer" }
+ }
+ },
+ "shutdownError": {
+ "required": ["name"],
+ "properties": {
+ "name": { "enum": ["shutdownerror"] }
+ }
+ },
+ "authError": {
+ "required": ["name"],
+ "properties": {
+ "name": { "enum": ["autherror"] },
+ "from": { "enum": ["tokenserver", "fxaccounts", "hawkclient"] }
+ }
+ },
+ "otherError": {
+ "required": ["name"],
+ "properties": {
+ "name": { "enum": ["othererror"] },
+ "error": { "type": "string" }
+ }
+ },
+ "unexpectedError": {
+ "required": ["name"],
+ "properties": {
+ "name": { "enum": ["unexpectederror"] },
+ "error": { "type": "string" }
+ }
+ },
+ "sqlError": {
+ "required": ["name"],
+ "properties": {
+ "name": { "enum": ["sqlerror"] },
+ "code": { "type": "integer" }
+ }
+ },
+ "step": {
+ "required": ["name"],
+ "properties": {
+ "name": { "type": "string" },
+ "took": { "type": "integer", "minimum": 1 },
+ "counts": {
+ "type": "array",
+ "minItems": 1,
+ "$ref": "#/definitions/namedCount"
+ }
+ }
+ },
+ "namedCount": {
+ "required": ["name", "count"],
+ "properties": {
+ "name": { "type": "string" },
+ "count": { "type": "integer" }
+ }
+ }
+ }
+}
diff --git a/services/sync/tests/unit/systemaddon-search.json b/services/sync/tests/unit/systemaddon-search.json
new file mode 100644
index 0000000000..a812714918
--- /dev/null
+++ b/services/sync/tests/unit/systemaddon-search.json
@@ -0,0 +1,21 @@
+{
+ "next": null,
+ "results": [
+ {
+ "name": "System Add-on Test",
+ "type": "extension",
+ "guid": "system1@tests.mozilla.org",
+ "current_version": {
+ "version": "1.0",
+ "files": [
+ {
+ "platform": "all",
+ "size": 999,
+ "url": "http://127.0.0.1:8888/system.xpi"
+ }
+ ]
+ },
+ "last_updated": "2011-09-05T20:42:09Z"
+ }
+ ]
+}
diff --git a/services/sync/tests/unit/test_412.js b/services/sync/tests/unit/test_412.js
new file mode 100644
index 0000000000..de0a2c087e
--- /dev/null
+++ b/services/sync/tests/unit/test_412.js
@@ -0,0 +1,60 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { RotaryEngine } = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/rotaryengine.sys.mjs"
+);
+
+add_task(async function test_412_not_treated_as_failure() {
+ await Service.engineManager.register(RotaryEngine);
+ let engine = Service.engineManager.get("rotary");
+
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ // add an item to the server to the first sync advances lastModified.
+ let collection = server.getCollection("foo", "rotary");
+ let payload = encryptPayload({
+ id: "existing",
+ something: "existing record",
+ });
+ collection.insert("existing", payload);
+
+ let promiseObserved = promiseOneObserver("weave:engine:sync:finish");
+ try {
+ // Do sync.
+ _("initial sync to initialize the world");
+ await Service.sync();
+
+ // create a new record that should be uploaded and arrange for our lastSync
+ // timestamp to be wrong so we get a 412.
+ engine._store.items = { new: "new record" };
+ await engine._tracker.addChangedID("new", 0);
+
+ let saw412 = false;
+ let _uploadOutgoing = engine._uploadOutgoing;
+ engine._uploadOutgoing = async () => {
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 2);
+ try {
+ await _uploadOutgoing.call(engine);
+ } catch (ex) {
+ saw412 = ex.status == 412;
+ throw ex;
+ }
+ };
+ _("Second sync - expecting a 412");
+ await Service.sync();
+ await promiseObserved;
+ ok(saw412, "did see a 412 error");
+ // But service status should be OK as the 412 shouldn't be treated as an error.
+ equal(Service.status.service, STATUS_OK);
+ } finally {
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_addon_utils.js b/services/sync/tests/unit/test_addon_utils.js
new file mode 100644
index 0000000000..c039bee16c
--- /dev/null
+++ b/services/sync/tests/unit/test_addon_utils.js
@@ -0,0 +1,156 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { AddonUtils } = ChromeUtils.importESModule(
+ "resource://services-sync/addonutils.sys.mjs"
+);
+
+const HTTP_PORT = 8888;
+const SERVER_ADDRESS = "http://127.0.0.1:8888";
+
+Services.prefs.setStringPref(
+ "extensions.getAddons.get.url",
+ SERVER_ADDRESS + "/search/guid:%IDS%"
+);
+
+AddonTestUtils.init(this);
+AddonTestUtils.createAppInfo(
+ "xpcshell@tests.mozilla.org",
+ "XPCShell",
+ "1",
+ "1.9.2"
+);
+
+add_task(async function setup() {
+ await AddonTestUtils.promiseStartupManager();
+});
+
+function createAndStartHTTPServer(port = HTTP_PORT) {
+ try {
+ let server = new HttpServer();
+
+ server.registerFile(
+ "/search/guid:missing-sourceuri%40tests.mozilla.org",
+ do_get_file("missing-sourceuri.json")
+ );
+
+ server.registerFile(
+ "/search/guid:rewrite%40tests.mozilla.org",
+ do_get_file("rewrite-search.json")
+ );
+
+ server.start(port);
+
+ return server;
+ } catch (ex) {
+ _("Got exception starting HTTP server on port " + port);
+ _("Error: " + Log.exceptionStr(ex));
+ do_throw(ex);
+ }
+ return null; /* not hit, but keeps eslint happy! */
+}
+
+function run_test() {
+ syncTestLogging();
+
+ run_next_test();
+}
+
+add_task(async function test_handle_empty_source_uri() {
+ _("Ensure that search results without a sourceURI are properly ignored.");
+
+ let server = createAndStartHTTPServer();
+
+ const ID = "missing-sourceuri@tests.mozilla.org";
+
+ const result = await AddonUtils.installAddons([
+ { id: ID, requireSecureURI: false },
+ ]);
+
+ Assert.ok("installedIDs" in result);
+ Assert.equal(0, result.installedIDs.length);
+
+ Assert.ok("skipped" in result);
+ Assert.ok(result.skipped.includes(ID));
+
+ await promiseStopServer(server);
+});
+
+add_test(function test_ignore_untrusted_source_uris() {
+ _("Ensures that source URIs from insecure schemes are rejected.");
+
+ const bad = [
+ "http://example.com/foo.xpi",
+ "ftp://example.com/foo.xpi",
+ "silly://example.com/foo.xpi",
+ ];
+
+ const good = ["https://example.com/foo.xpi"];
+
+ for (let s of bad) {
+ let sourceURI = Services.io.newURI(s);
+ let addon = { sourceURI, name: "bad", id: "bad" };
+
+ let canInstall = AddonUtils.canInstallAddon(addon);
+ Assert.ok(!canInstall, "Correctly rejected a bad URL");
+ }
+
+ for (let s of good) {
+ let sourceURI = Services.io.newURI(s);
+ let addon = { sourceURI, name: "good", id: "good" };
+
+ let canInstall = AddonUtils.canInstallAddon(addon);
+ Assert.ok(canInstall, "Correctly accepted a good URL");
+ }
+ run_next_test();
+});
+
+add_task(async function test_source_uri_rewrite() {
+ _("Ensure that a 'src=api' query string is rewritten to 'src=sync'");
+
+ // This tests for conformance with bug 708134 so server-side metrics aren't
+ // skewed.
+
+ // We resort to monkeypatching because of the API design.
+ let oldFunction =
+ Object.getPrototypeOf(AddonUtils).installAddonFromSearchResult;
+
+ let installCalled = false;
+ Object.getPrototypeOf(AddonUtils).installAddonFromSearchResult =
+ async function testInstallAddon(addon, metadata) {
+ Assert.equal(
+ SERVER_ADDRESS + "/require.xpi?src=sync",
+ addon.sourceURI.spec
+ );
+
+ installCalled = true;
+
+ const install = await AddonUtils.getInstallFromSearchResult(addon);
+ Assert.equal(
+ SERVER_ADDRESS + "/require.xpi?src=sync",
+ install.sourceURI.spec
+ );
+ Assert.deepEqual(
+ install.installTelemetryInfo,
+ { source: "sync" },
+ "Got the expected installTelemetryInfo"
+ );
+
+ return { id: addon.id, addon, install };
+ };
+
+ let server = createAndStartHTTPServer();
+
+ let installOptions = {
+ id: "rewrite@tests.mozilla.org",
+ requireSecureURI: false,
+ };
+ await AddonUtils.installAddons([installOptions]);
+
+ Assert.ok(installCalled);
+ Object.getPrototypeOf(AddonUtils).installAddonFromSearchResult = oldFunction;
+
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_addons_engine.js b/services/sync/tests/unit/test_addons_engine.js
new file mode 100644
index 0000000000..d957ed8fd3
--- /dev/null
+++ b/services/sync/tests/unit/test_addons_engine.js
@@ -0,0 +1,277 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { AddonManager } = ChromeUtils.importESModule(
+ "resource://gre/modules/AddonManager.sys.mjs"
+);
+const { CHANGE_INSTALLED } = ChromeUtils.importESModule(
+ "resource://services-sync/addonsreconciler.sys.mjs"
+);
+const { AddonsEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/addons.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+Services.prefs.setStringPref(
+ "extensions.getAddons.get.url",
+ "http://localhost:8888/search/guid:%IDS%"
+);
+Services.prefs.setBoolPref("extensions.install.requireSecureOrigin", false);
+
+let engine;
+let syncID;
+let reconciler;
+let tracker;
+
+AddonTestUtils.init(this);
+
+const ADDON_ID = "addon1@tests.mozilla.org";
+const XPI = AddonTestUtils.createTempWebExtensionFile({
+ manifest: {
+ name: "Test 1",
+ description: "Test Description",
+ browser_specific_settings: { gecko: { id: ADDON_ID } },
+ },
+});
+
+async function resetReconciler() {
+ reconciler._addons = {};
+ reconciler._changes = [];
+
+ await reconciler.saveState();
+
+ await tracker.clearChangedIDs();
+}
+
+add_task(async function setup() {
+ AddonTestUtils.createAppInfo(
+ "xpcshell@tests.mozilla.org",
+ "XPCShell",
+ "1",
+ "1.9.2"
+ );
+ AddonTestUtils.overrideCertDB();
+ await AddonTestUtils.promiseStartupManager();
+
+ await Service.engineManager.register(AddonsEngine);
+ engine = Service.engineManager.get("addons");
+ syncID = await engine.resetLocalSyncID();
+ reconciler = engine._reconciler;
+ tracker = engine._tracker;
+
+ reconciler.startListening();
+
+ // Don't flush to disk in the middle of an event listener!
+ // This causes test hangs on WinXP.
+ reconciler._shouldPersist = false;
+
+ await resetReconciler();
+});
+
+// This is a basic sanity test for the unit test itself. If this breaks, the
+// add-ons API likely changed upstream.
+add_task(async function test_addon_install() {
+ _("Ensure basic add-on APIs work as expected.");
+
+ let install = await AddonManager.getInstallForFile(XPI);
+ Assert.notEqual(install, null);
+ Assert.equal(install.type, "extension");
+ Assert.equal(install.name, "Test 1");
+
+ await resetReconciler();
+});
+
+add_task(async function test_find_dupe() {
+ _("Ensure the _findDupe() implementation is sane.");
+
+ // This gets invoked at the top of sync, which is bypassed by this
+ // test, so we do it manually.
+ await engine._refreshReconcilerState();
+
+ let addon = await installAddon(XPI, reconciler);
+
+ let record = {
+ id: Utils.makeGUID(),
+ addonID: ADDON_ID,
+ enabled: true,
+ applicationID: Services.appinfo.ID,
+ source: "amo",
+ };
+
+ let dupe = await engine._findDupe(record);
+ Assert.equal(addon.syncGUID, dupe);
+
+ record.id = addon.syncGUID;
+ dupe = await engine._findDupe(record);
+ Assert.equal(null, dupe);
+
+ await uninstallAddon(addon, reconciler);
+ await resetReconciler();
+});
+
+add_task(async function test_get_changed_ids() {
+ let timerPrecision = Services.prefs.getBoolPref(
+ "privacy.reduceTimerPrecision"
+ );
+ Services.prefs.setBoolPref("privacy.reduceTimerPrecision", false);
+
+ registerCleanupFunction(function () {
+ Services.prefs.setBoolPref("privacy.reduceTimerPrecision", timerPrecision);
+ });
+
+ _("Ensure getChangedIDs() has the appropriate behavior.");
+
+ _("Ensure getChangedIDs() returns an empty object by default.");
+ let changes = await engine.getChangedIDs();
+ Assert.equal("object", typeof changes);
+ Assert.equal(0, Object.keys(changes).length);
+
+ _("Ensure tracker changes are populated.");
+ let now = new Date();
+ let changeTime = now.getTime() / 1000;
+ let guid1 = Utils.makeGUID();
+ await tracker.addChangedID(guid1, changeTime);
+
+ changes = await engine.getChangedIDs();
+ Assert.equal("object", typeof changes);
+ Assert.equal(1, Object.keys(changes).length);
+ Assert.ok(guid1 in changes);
+ Assert.equal(changeTime, changes[guid1]);
+
+ await tracker.clearChangedIDs();
+
+ _("Ensure reconciler changes are populated.");
+ let addon = await installAddon(XPI, reconciler);
+ await tracker.clearChangedIDs(); // Just in case.
+ changes = await engine.getChangedIDs();
+ Assert.equal("object", typeof changes);
+ Assert.equal(1, Object.keys(changes).length);
+ Assert.ok(addon.syncGUID in changes);
+ _(
+ "Change time: " + changeTime + ", addon change: " + changes[addon.syncGUID]
+ );
+ Assert.ok(changes[addon.syncGUID] >= changeTime);
+
+ let oldTime = changes[addon.syncGUID];
+ let guid2 = addon.syncGUID;
+ await uninstallAddon(addon, reconciler);
+ changes = await engine.getChangedIDs();
+ Assert.equal(1, Object.keys(changes).length);
+ Assert.ok(guid2 in changes);
+ Assert.ok(changes[guid2] > oldTime);
+
+ _("Ensure non-syncable add-ons aren't picked up by reconciler changes.");
+ reconciler._addons = {};
+ reconciler._changes = [];
+ let record = {
+ id: "DUMMY",
+ guid: Utils.makeGUID(),
+ enabled: true,
+ installed: true,
+ modified: new Date(),
+ type: "UNSUPPORTED",
+ scope: 0,
+ foreignInstall: false,
+ };
+ reconciler.addons.DUMMY = record;
+ await reconciler._addChange(record.modified, CHANGE_INSTALLED, record);
+
+ changes = await engine.getChangedIDs();
+ _(JSON.stringify(changes));
+ Assert.equal(0, Object.keys(changes).length);
+
+ await resetReconciler();
+});
+
+add_task(async function test_disabled_install_semantics() {
+ _("Ensure that syncing a disabled add-on preserves proper state.");
+
+ // This is essentially a test for bug 712542, which snuck into the original
+ // add-on sync drop. It ensures that when an add-on is installed that the
+ // disabled state and incoming syncGUID is preserved, even on the next sync.
+ const USER = "foo";
+ const PASSWORD = "password";
+
+ let server = new SyncServer();
+ server.start();
+ await SyncTestingInfrastructure(server, USER, PASSWORD);
+
+ await generateNewKeys(Service.collectionKeys);
+
+ let contents = {
+ meta: {
+ global: { engines: { addons: { version: engine.version, syncID } } },
+ },
+ crypto: {},
+ addons: {},
+ };
+
+ server.registerUser(USER, "password");
+ server.createContents(USER, contents);
+
+ let amoServer = new HttpServer();
+ amoServer.registerFile(
+ "/search/guid:addon1%40tests.mozilla.org",
+ do_get_file("addon1-search.json")
+ );
+
+ amoServer.registerFile("/addon1.xpi", XPI);
+ amoServer.start(8888);
+
+ // Insert an existing record into the server.
+ let id = Utils.makeGUID();
+ let now = Date.now() / 1000;
+
+ let record = encryptPayload({
+ id,
+ applicationID: Services.appinfo.ID,
+ addonID: ADDON_ID,
+ enabled: false,
+ deleted: false,
+ source: "amo",
+ });
+ let wbo = new ServerWBO(id, record, now - 2);
+ server.insertWBO(USER, "addons", wbo);
+
+ _("Performing sync of add-ons engine.");
+ await engine._sync();
+
+ // At this point the non-restartless extension should be staged for install.
+
+ // Don't need this server any more.
+ await promiseStopServer(amoServer);
+
+ // We ensure the reconciler has recorded the proper ID and enabled state.
+ let addon = reconciler.getAddonStateFromSyncGUID(id);
+ Assert.notEqual(null, addon);
+ Assert.equal(false, addon.enabled);
+
+ // We fake an app restart and perform another sync, just to make sure things
+ // are sane.
+ await AddonTestUtils.promiseRestartManager();
+
+ let collection = server.getCollection(USER, "addons");
+ engine.lastModified = collection.timestamp;
+ await engine._sync();
+
+ // The client should not upload a new record. The old record should be
+ // retained and unmodified.
+ Assert.equal(1, collection.count());
+
+ let payload = collection.payloads()[0];
+ Assert.notEqual(null, collection.wbo(id));
+ Assert.equal(ADDON_ID, payload.addonID);
+ Assert.ok(!payload.enabled);
+
+ await promiseStopServer(server);
+});
+
+add_test(function cleanup() {
+ // There's an xpcom-shutdown hook for this, but let's give this a shot.
+ reconciler.stopListening();
+ run_next_test();
+});
diff --git a/services/sync/tests/unit/test_addons_reconciler.js b/services/sync/tests/unit/test_addons_reconciler.js
new file mode 100644
index 0000000000..c72b18b00e
--- /dev/null
+++ b/services/sync/tests/unit/test_addons_reconciler.js
@@ -0,0 +1,209 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { AddonsReconciler, CHANGE_INSTALLED, CHANGE_UNINSTALLED } =
+ ChromeUtils.importESModule(
+ "resource://services-sync/addonsreconciler.sys.mjs"
+ );
+const { AddonsEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/addons.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+AddonTestUtils.init(this);
+AddonTestUtils.createAppInfo(
+ "xpcshell@tests.mozilla.org",
+ "XPCShell",
+ "1",
+ "1.9.2"
+);
+AddonTestUtils.overrideCertDB();
+
+const ADDON_ID = "addon1@tests.mozilla.org";
+const XPI = AddonTestUtils.createTempWebExtensionFile({
+ manifest: {
+ name: "Test 1",
+ description: "Test Description",
+ browser_specific_settings: { gecko: { id: ADDON_ID } },
+ },
+});
+
+function makeAddonsReconciler() {
+ const log = Service.engineManager.get("addons")._log;
+ const queueCaller = Async.asyncQueueCaller(log);
+ return new AddonsReconciler(queueCaller);
+}
+
+add_task(async function setup() {
+ await AddonTestUtils.promiseStartupManager();
+ Svc.PrefBranch.setBoolPref("engine.addons", true);
+ await Service.engineManager.register(AddonsEngine);
+});
+
+add_task(async function test_defaults() {
+ _("Ensure new objects have reasonable defaults.");
+
+ let reconciler = makeAddonsReconciler();
+ await reconciler.ensureStateLoaded();
+
+ Assert.ok(!reconciler._listening);
+ Assert.equal("object", typeof reconciler.addons);
+ Assert.equal(0, Object.keys(reconciler.addons).length);
+ Assert.equal(0, reconciler._changes.length);
+ Assert.equal(0, reconciler._listeners.length);
+});
+
+add_task(async function test_load_state_empty_file() {
+ _("Ensure loading from a missing file results in defaults being set.");
+
+ let reconciler = makeAddonsReconciler();
+ await reconciler.ensureStateLoaded();
+
+ let loaded = await reconciler.loadState();
+ Assert.ok(!loaded);
+
+ Assert.equal("object", typeof reconciler.addons);
+ Assert.equal(0, Object.keys(reconciler.addons).length);
+ Assert.equal(0, reconciler._changes.length);
+});
+
+add_task(async function test_install_detection() {
+ _("Ensure that add-on installation results in appropriate side-effects.");
+
+ let reconciler = makeAddonsReconciler();
+ await reconciler.ensureStateLoaded();
+ reconciler.startListening();
+
+ let before = new Date();
+ let addon = await installAddon(XPI);
+ let after = new Date();
+
+ Assert.equal(1, Object.keys(reconciler.addons).length);
+ Assert.ok(addon.id in reconciler.addons);
+ let record = reconciler.addons[ADDON_ID];
+
+ const KEYS = [
+ "id",
+ "guid",
+ "enabled",
+ "installed",
+ "modified",
+ "type",
+ "scope",
+ "foreignInstall",
+ ];
+ for (let key of KEYS) {
+ Assert.ok(key in record);
+ Assert.notEqual(null, record[key]);
+ }
+
+ Assert.equal(addon.id, record.id);
+ Assert.equal(addon.syncGUID, record.guid);
+ Assert.ok(record.enabled);
+ Assert.ok(record.installed);
+ Assert.ok(record.modified >= before && record.modified <= after);
+ Assert.equal("extension", record.type);
+ Assert.ok(!record.foreignInstall);
+
+ Assert.equal(1, reconciler._changes.length);
+ let change = reconciler._changes[0];
+ Assert.ok(change[0] >= before && change[1] <= after);
+ Assert.equal(CHANGE_INSTALLED, change[1]);
+ Assert.equal(addon.id, change[2]);
+
+ await uninstallAddon(addon);
+});
+
+add_task(async function test_uninstall_detection() {
+ _("Ensure that add-on uninstallation results in appropriate side-effects.");
+
+ let reconciler = makeAddonsReconciler();
+ await reconciler.ensureStateLoaded();
+ reconciler.startListening();
+
+ reconciler._addons = {};
+ reconciler._changes = [];
+
+ let addon = await installAddon(XPI);
+ let id = addon.id;
+
+ reconciler._changes = [];
+ await uninstallAddon(addon, reconciler);
+
+ Assert.equal(1, Object.keys(reconciler.addons).length);
+ Assert.ok(id in reconciler.addons);
+
+ let record = reconciler.addons[id];
+ Assert.ok(!record.installed);
+
+ Assert.equal(1, reconciler._changes.length);
+ let change = reconciler._changes[0];
+ Assert.equal(CHANGE_UNINSTALLED, change[1]);
+ Assert.equal(id, change[2]);
+});
+
+add_task(async function test_load_state_future_version() {
+ _("Ensure loading a file from a future version results in no data loaded.");
+
+ const FILENAME = "TEST_LOAD_STATE_FUTURE_VERSION";
+
+ let reconciler = makeAddonsReconciler();
+ await reconciler.ensureStateLoaded();
+
+ // First we populate our new file.
+ let state = { version: 100, addons: { foo: {} }, changes: [[1, 1, "foo"]] };
+
+ // jsonSave() expects an object with ._log, so we give it a reconciler
+ // instance.
+ await Utils.jsonSave(FILENAME, reconciler, state);
+
+ let loaded = await reconciler.loadState(FILENAME);
+ Assert.ok(!loaded);
+
+ Assert.equal("object", typeof reconciler.addons);
+ Assert.equal(0, Object.keys(reconciler.addons).length);
+ Assert.equal(0, reconciler._changes.length);
+});
+
+add_task(async function test_prune_changes_before_date() {
+ _("Ensure that old changes are pruned properly.");
+
+ let reconciler = makeAddonsReconciler();
+ await reconciler.ensureStateLoaded();
+ reconciler._changes = [];
+
+ let now = new Date();
+ const HOUR_MS = 1000 * 60 * 60;
+
+ _("Ensure pruning an empty changes array works.");
+ reconciler.pruneChangesBeforeDate(now);
+ Assert.equal(0, reconciler._changes.length);
+
+ let old = new Date(now.getTime() - HOUR_MS);
+ let young = new Date(now.getTime() - 1000);
+ reconciler._changes.push([old, CHANGE_INSTALLED, "foo"]);
+ reconciler._changes.push([young, CHANGE_INSTALLED, "bar"]);
+ Assert.equal(2, reconciler._changes.length);
+
+ _("Ensure pruning with an old time won't delete anything.");
+ let threshold = new Date(old.getTime() - 1);
+ reconciler.pruneChangesBeforeDate(threshold);
+ Assert.equal(2, reconciler._changes.length);
+
+ _("Ensure pruning a single item works.");
+ threshold = new Date(young.getTime() - 1000);
+ reconciler.pruneChangesBeforeDate(threshold);
+ Assert.equal(1, reconciler._changes.length);
+ Assert.notEqual(undefined, reconciler._changes[0]);
+ Assert.equal(young, reconciler._changes[0][0]);
+ Assert.equal("bar", reconciler._changes[0][2]);
+
+ _("Ensure pruning all changes works.");
+ reconciler._changes.push([old, CHANGE_INSTALLED, "foo"]);
+ reconciler.pruneChangesBeforeDate(now);
+ Assert.equal(0, reconciler._changes.length);
+});
diff --git a/services/sync/tests/unit/test_addons_store.js b/services/sync/tests/unit/test_addons_store.js
new file mode 100644
index 0000000000..a0a3ac6c69
--- /dev/null
+++ b/services/sync/tests/unit/test_addons_store.js
@@ -0,0 +1,750 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { AddonsEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/addons.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+const { SyncedRecordsTelemetry } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+);
+
+const HTTP_PORT = 8888;
+
+Services.prefs.setStringPref(
+ "extensions.getAddons.get.url",
+ "http://localhost:8888/search/guid:%IDS%"
+);
+// Note that all compat-override URLs currently 404, but that's OK - the main
+// thing is to avoid us hitting the real AMO.
+Services.prefs.setStringPref(
+ "extensions.getAddons.compatOverides.url",
+ "http://localhost:8888/compat-override/guid:%IDS%"
+);
+Services.prefs.setBoolPref("extensions.install.requireSecureOrigin", false);
+Services.prefs.setBoolPref("extensions.checkUpdateSecurity", false);
+
+AddonTestUtils.init(this);
+AddonTestUtils.createAppInfo(
+ "xpcshell@tests.mozilla.org",
+ "XPCShell",
+ "1",
+ "1.9.2"
+);
+AddonTestUtils.overrideCertDB();
+
+Services.prefs.setBoolPref("extensions.experiments.enabled", true);
+
+const SYSTEM_ADDON_ID = "system1@tests.mozilla.org";
+add_task(async function setupSystemAddon() {
+ const distroDir = FileUtils.getDir("ProfD", ["sysfeatures", "app0"]);
+ distroDir.create(Ci.nsIFile.DIRECTORY_TYPE, FileUtils.PERMS_DIRECTORY);
+ AddonTestUtils.registerDirectory("XREAppFeat", distroDir);
+
+ let xpi = await AddonTestUtils.createTempWebExtensionFile({
+ manifest: {
+ browser_specific_settings: { gecko: { id: SYSTEM_ADDON_ID } },
+ },
+ });
+
+ xpi.copyTo(distroDir, `${SYSTEM_ADDON_ID}.xpi`);
+
+ await AddonTestUtils.overrideBuiltIns({ system: [SYSTEM_ADDON_ID] });
+ await AddonTestUtils.promiseStartupManager();
+});
+
+const ID1 = "addon1@tests.mozilla.org";
+const ID2 = "addon2@tests.mozilla.org";
+const ID3 = "addon3@tests.mozilla.org";
+
+const ADDONS = {
+ test_addon1: {
+ manifest: {
+ browser_specific_settings: {
+ gecko: {
+ id: ID1,
+ update_url: "http://example.com/data/test_install.json",
+ },
+ },
+ },
+ },
+
+ test_addon2: {
+ manifest: {
+ browser_specific_settings: { gecko: { id: ID2 } },
+ },
+ },
+
+ test_addon3: {
+ manifest: {
+ browser_specific_settings: {
+ gecko: {
+ id: ID3,
+ strict_max_version: "0",
+ },
+ },
+ },
+ },
+};
+
+const SEARCH_RESULT = {
+ next: null,
+ results: [
+ {
+ name: "Test Extension",
+ type: "extension",
+ guid: "addon1@tests.mozilla.org",
+ current_version: {
+ version: "1.0",
+ files: [
+ {
+ platform: "all",
+ size: 485,
+ url: "http://localhost:8888/addon1.xpi",
+ },
+ ],
+ },
+ last_updated: "2018-10-27T04:12:00.826Z",
+ },
+ ],
+};
+
+const MISSING_SEARCH_RESULT = {
+ next: null,
+ results: [
+ {
+ name: "Test",
+ type: "extension",
+ guid: "missing-xpi@tests.mozilla.org",
+ current_version: {
+ version: "1.0",
+ files: [
+ {
+ platform: "all",
+ size: 123,
+ url: "http://localhost:8888/THIS_DOES_NOT_EXIST.xpi",
+ },
+ ],
+ },
+ },
+ ],
+};
+
+const XPIS = {};
+for (let [name, files] of Object.entries(ADDONS)) {
+ XPIS[name] = AddonTestUtils.createTempWebExtensionFile(files);
+}
+
+let engine;
+let store;
+let reconciler;
+
+const proxyService = Cc[
+ "@mozilla.org/network/protocol-proxy-service;1"
+].getService(Ci.nsIProtocolProxyService);
+
+const proxyFilter = {
+ proxyInfo: proxyService.newProxyInfo(
+ "http",
+ "localhost",
+ HTTP_PORT,
+ "",
+ "",
+ 0,
+ 4096,
+ null
+ ),
+
+ applyFilter(channel, defaultProxyInfo, callback) {
+ if (channel.URI.host === "example.com") {
+ callback.onProxyFilterResult(this.proxyInfo);
+ } else {
+ callback.onProxyFilterResult(defaultProxyInfo);
+ }
+ },
+};
+
+proxyService.registerChannelFilter(proxyFilter, 0);
+registerCleanupFunction(() => {
+ proxyService.unregisterChannelFilter(proxyFilter);
+});
+
+/**
+ * Create a AddonsRec for this application with the fields specified.
+ *
+ * @param id Sync GUID of record
+ * @param addonId ID of add-on
+ * @param enabled Boolean whether record is enabled
+ * @param deleted Boolean whether record was deleted
+ */
+function createRecordForThisApp(id, addonId, enabled, deleted) {
+ return {
+ id,
+ addonID: addonId,
+ enabled,
+ deleted: !!deleted,
+ applicationID: Services.appinfo.ID,
+ source: "amo",
+ };
+}
+
+function createAndStartHTTPServer(port) {
+ try {
+ let server = new HttpServer();
+
+ server.registerPathHandler(
+ "/search/guid:addon1%40tests.mozilla.org",
+ (req, resp) => {
+ resp.setHeader("Content-type", "application/json", true);
+ resp.write(JSON.stringify(SEARCH_RESULT));
+ }
+ );
+ server.registerPathHandler(
+ "/search/guid:missing-xpi%40tests.mozilla.org",
+ (req, resp) => {
+ resp.setHeader("Content-type", "application/json", true);
+ resp.write(JSON.stringify(MISSING_SEARCH_RESULT));
+ }
+ );
+ server.registerFile("/addon1.xpi", XPIS.test_addon1);
+
+ server.start(port);
+
+ return server;
+ } catch (ex) {
+ _("Got exception starting HTTP server on port " + port);
+ _("Error: " + Log.exceptionStr(ex));
+ do_throw(ex);
+ }
+ return null; /* not hit, but keeps eslint happy! */
+}
+
+// A helper function to ensure that the reconciler's current view of the addon
+// is the same as the addon itself. If it's not, then the reconciler missed a
+// change, and is likely to re-upload the addon next sync because of the change
+// it missed.
+async function checkReconcilerUpToDate(addon) {
+ let stateBefore = Object.assign({}, store.reconciler.addons[addon.id]);
+ await store.reconciler.rectifyStateFromAddon(addon);
+ let stateAfter = store.reconciler.addons[addon.id];
+ deepEqual(stateBefore, stateAfter);
+}
+
+add_task(async function setup() {
+ await Service.engineManager.register(AddonsEngine);
+ engine = Service.engineManager.get("addons");
+ store = engine._store;
+ reconciler = engine._reconciler;
+
+ reconciler.startListening();
+
+ // Don't flush to disk in the middle of an event listener!
+ // This causes test hangs on WinXP.
+ reconciler._shouldPersist = false;
+});
+
+add_task(async function test_remove() {
+ _("Ensure removing add-ons from deleted records works.");
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ let record = createRecordForThisApp(addon.syncGUID, ID1, true, true);
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch([record], countTelemetry);
+ Assert.equal(0, failed.length);
+ Assert.equal(null, countTelemetry.failedReasons);
+ Assert.equal(0, countTelemetry.incomingCounts.failed);
+
+ let newAddon = await AddonManager.getAddonByID(ID1);
+ Assert.equal(null, newAddon);
+});
+
+add_task(async function test_apply_enabled() {
+ let countTelemetry = new SyncedRecordsTelemetry();
+ _("Ensures that changes to the userEnabled flag apply.");
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ Assert.ok(addon.isActive);
+ Assert.ok(!addon.userDisabled);
+
+ _("Ensure application of a disable record works as expected.");
+ let records = [];
+ records.push(createRecordForThisApp(addon.syncGUID, ID1, false, false));
+
+ let [failed] = await Promise.all([
+ store.applyIncomingBatch(records, countTelemetry),
+ AddonTestUtils.promiseAddonEvent("onDisabled"),
+ ]);
+ Assert.equal(0, failed.length);
+ Assert.equal(0, countTelemetry.incomingCounts.failed);
+ addon = await AddonManager.getAddonByID(ID1);
+ Assert.ok(addon.userDisabled);
+ await checkReconcilerUpToDate(addon);
+ records = [];
+
+ _("Ensure enable record works as expected.");
+ records.push(createRecordForThisApp(addon.syncGUID, ID1, true, false));
+ [failed] = await Promise.all([
+ store.applyIncomingBatch(records, countTelemetry),
+ AddonTestUtils.promiseWebExtensionStartup(ID1),
+ ]);
+ Assert.equal(0, failed.length);
+ Assert.equal(0, countTelemetry.incomingCounts.failed);
+ addon = await AddonManager.getAddonByID(ID1);
+ Assert.ok(!addon.userDisabled);
+ await checkReconcilerUpToDate(addon);
+ records = [];
+
+ _("Ensure enabled state updates don't apply if the ignore pref is set.");
+ records.push(createRecordForThisApp(addon.syncGUID, ID1, false, false));
+ Svc.PrefBranch.setBoolPref("addons.ignoreUserEnabledChanges", true);
+ failed = await store.applyIncomingBatch(records, countTelemetry);
+ Assert.equal(0, failed.length);
+ Assert.equal(0, countTelemetry.incomingCounts.failed);
+ addon = await AddonManager.getAddonByID(ID1);
+ Assert.ok(!addon.userDisabled);
+ records = [];
+
+ await uninstallAddon(addon, reconciler);
+ Svc.PrefBranch.clearUserPref("addons.ignoreUserEnabledChanges");
+});
+
+add_task(async function test_apply_enabled_appDisabled() {
+ _(
+ "Ensures that changes to the userEnabled flag apply when the addon is appDisabled."
+ );
+
+ // this addon is appDisabled by default.
+ let addon = await installAddon(XPIS.test_addon3);
+ Assert.ok(addon.appDisabled);
+ Assert.ok(!addon.isActive);
+ Assert.ok(!addon.userDisabled);
+
+ _("Ensure application of a disable record works as expected.");
+ store.reconciler.pruneChangesBeforeDate(Date.now() + 10);
+ store.reconciler._changes = [];
+ let records = [];
+ let countTelemetry = new SyncedRecordsTelemetry();
+ records.push(createRecordForThisApp(addon.syncGUID, ID3, false, false));
+ let failed = await store.applyIncomingBatch(records, countTelemetry);
+ Assert.equal(0, failed.length);
+ Assert.equal(0, countTelemetry.incomingCounts.failed);
+ addon = await AddonManager.getAddonByID(ID3);
+ Assert.ok(addon.userDisabled);
+ await checkReconcilerUpToDate(addon);
+ records = [];
+
+ _("Ensure enable record works as expected.");
+ records.push(createRecordForThisApp(addon.syncGUID, ID3, true, false));
+ failed = await store.applyIncomingBatch(records, countTelemetry);
+ Assert.equal(0, failed.length);
+ Assert.equal(0, countTelemetry.incomingCounts.failed);
+ addon = await AddonManager.getAddonByID(ID3);
+ Assert.ok(!addon.userDisabled);
+ await checkReconcilerUpToDate(addon);
+ records = [];
+
+ await uninstallAddon(addon, reconciler);
+});
+
+add_task(async function test_ignore_different_appid() {
+ _(
+ "Ensure that incoming records with a different application ID are ignored."
+ );
+
+ // We test by creating a record that should result in an update.
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ Assert.ok(!addon.userDisabled);
+
+ let record = createRecordForThisApp(addon.syncGUID, ID1, false, false);
+ record.applicationID = "FAKE_ID";
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch([record], countTelemetry);
+ Assert.equal(0, failed.length);
+
+ let newAddon = await AddonManager.getAddonByID(ID1);
+ Assert.ok(!newAddon.userDisabled);
+
+ await uninstallAddon(addon, reconciler);
+});
+
+add_task(async function test_ignore_unknown_source() {
+ _("Ensure incoming records with unknown source are ignored.");
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+
+ let record = createRecordForThisApp(addon.syncGUID, ID1, false, false);
+ record.source = "DUMMY_SOURCE";
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch([record], countTelemetry);
+ Assert.equal(0, failed.length);
+
+ let newAddon = await AddonManager.getAddonByID(ID1);
+ Assert.ok(!newAddon.userDisabled);
+
+ await uninstallAddon(addon, reconciler);
+});
+
+add_task(async function test_apply_uninstall() {
+ _("Ensures that uninstalling an add-on from a record works.");
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+
+ let records = [];
+ let countTelemetry = new SyncedRecordsTelemetry();
+ records.push(createRecordForThisApp(addon.syncGUID, ID1, true, true));
+ let failed = await store.applyIncomingBatch(records, countTelemetry);
+ Assert.equal(0, failed.length);
+ Assert.equal(0, countTelemetry.incomingCounts.failed);
+
+ addon = await AddonManager.getAddonByID(ID1);
+ Assert.equal(null, addon);
+});
+
+add_task(async function test_addon_syncability() {
+ _("Ensure isAddonSyncable functions properly.");
+
+ Svc.PrefBranch.setStringPref(
+ "addons.trustedSourceHostnames",
+ "addons.mozilla.org,other.example.com"
+ );
+
+ Assert.ok(!(await store.isAddonSyncable(null)));
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ Assert.ok(await store.isAddonSyncable(addon));
+
+ let dummy = {};
+ const KEYS = [
+ "id",
+ "syncGUID",
+ "type",
+ "scope",
+ "foreignInstall",
+ "isSyncable",
+ ];
+ for (let k of KEYS) {
+ dummy[k] = addon[k];
+ }
+
+ Assert.ok(await store.isAddonSyncable(dummy));
+
+ dummy.type = "UNSUPPORTED";
+ Assert.ok(!(await store.isAddonSyncable(dummy)));
+ dummy.type = addon.type;
+
+ dummy.scope = 0;
+ Assert.ok(!(await store.isAddonSyncable(dummy)));
+ dummy.scope = addon.scope;
+
+ dummy.isSyncable = false;
+ Assert.ok(!(await store.isAddonSyncable(dummy)));
+ dummy.isSyncable = addon.isSyncable;
+
+ dummy.foreignInstall = true;
+ Assert.ok(!(await store.isAddonSyncable(dummy)));
+ dummy.foreignInstall = false;
+
+ await uninstallAddon(addon, reconciler);
+
+ Assert.ok(!store.isSourceURITrusted(null));
+
+ let trusted = [
+ "https://addons.mozilla.org/foo",
+ "https://other.example.com/foo",
+ ];
+
+ let untrusted = [
+ "http://addons.mozilla.org/foo", // non-https
+ "ftps://addons.mozilla.org/foo", // non-https
+ "https://untrusted.example.com/foo", // non-trusted hostname`
+ ];
+
+ for (let uri of trusted) {
+ Assert.ok(store.isSourceURITrusted(Services.io.newURI(uri)));
+ }
+
+ for (let uri of untrusted) {
+ Assert.ok(!store.isSourceURITrusted(Services.io.newURI(uri)));
+ }
+
+ Svc.PrefBranch.setStringPref("addons.trustedSourceHostnames", "");
+ for (let uri of trusted) {
+ Assert.ok(!store.isSourceURITrusted(Services.io.newURI(uri)));
+ }
+
+ Svc.PrefBranch.setStringPref(
+ "addons.trustedSourceHostnames",
+ "addons.mozilla.org"
+ );
+ Assert.ok(
+ store.isSourceURITrusted(
+ Services.io.newURI("https://addons.mozilla.org/foo")
+ )
+ );
+
+ Svc.PrefBranch.clearUserPref("addons.trustedSourceHostnames");
+});
+
+add_task(async function test_get_all_ids() {
+ _("Ensures that getAllIDs() returns an appropriate set.");
+
+ _("Installing two addons.");
+ // XXX - this test seems broken - at this point, before we've installed the
+ // addons below, store.getAllIDs() returns all addons installed by previous
+ // tests, even though those tests uninstalled the addon.
+ // So if any tests above ever add a new addon ID, they are going to need to
+ // be added here too.
+ // Assert.equal(0, Object.keys(store.getAllIDs()).length);
+ let addon1 = await installAddon(XPIS.test_addon1, reconciler);
+ let addon2 = await installAddon(XPIS.test_addon2, reconciler);
+ let addon3 = await installAddon(XPIS.test_addon3, reconciler);
+
+ _("Ensure they're syncable.");
+ Assert.ok(await store.isAddonSyncable(addon1));
+ Assert.ok(await store.isAddonSyncable(addon2));
+ Assert.ok(await store.isAddonSyncable(addon3));
+
+ let ids = await store.getAllIDs();
+
+ Assert.equal("object", typeof ids);
+ Assert.equal(3, Object.keys(ids).length);
+ Assert.ok(addon1.syncGUID in ids);
+ Assert.ok(addon2.syncGUID in ids);
+ Assert.ok(addon3.syncGUID in ids);
+
+ await uninstallAddon(addon1, reconciler);
+ await uninstallAddon(addon2, reconciler);
+ await uninstallAddon(addon3, reconciler);
+});
+
+add_task(async function test_change_item_id() {
+ _("Ensures that changeItemID() works properly.");
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+
+ let oldID = addon.syncGUID;
+ let newID = Utils.makeGUID();
+
+ await store.changeItemID(oldID, newID);
+
+ let newAddon = await AddonManager.getAddonByID(ID1);
+ Assert.notEqual(null, newAddon);
+ Assert.equal(newID, newAddon.syncGUID);
+
+ await uninstallAddon(newAddon, reconciler);
+});
+
+add_task(async function test_create() {
+ _("Ensure creating/installing an add-on from a record works.");
+
+ let server = createAndStartHTTPServer(HTTP_PORT);
+
+ let guid = Utils.makeGUID();
+ let record = createRecordForThisApp(guid, ID1, true, false);
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch([record], countTelemetry);
+ Assert.equal(0, failed.length);
+
+ let newAddon = await AddonManager.getAddonByID(ID1);
+ Assert.notEqual(null, newAddon);
+ Assert.equal(guid, newAddon.syncGUID);
+ Assert.ok(!newAddon.userDisabled);
+
+ await uninstallAddon(newAddon, reconciler);
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_create_missing_search() {
+ _("Ensures that failed add-on searches are handled gracefully.");
+
+ let server = createAndStartHTTPServer(HTTP_PORT);
+
+ // The handler for this ID is not installed, so a search should 404.
+ const id = "missing@tests.mozilla.org";
+ let guid = Utils.makeGUID();
+ let record = createRecordForThisApp(guid, id, true, false);
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch([record], countTelemetry);
+ Assert.equal(1, failed.length);
+ Assert.equal(guid, failed[0]);
+ Assert.equal(
+ countTelemetry.incomingCounts.failedReasons[0].name,
+ "GET <URL> failed (status 404)"
+ );
+ Assert.equal(countTelemetry.incomingCounts.failedReasons[0].count, 1);
+
+ let addon = await AddonManager.getAddonByID(id);
+ Assert.equal(null, addon);
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_create_bad_install() {
+ _("Ensures that add-ons without a valid install are handled gracefully.");
+
+ let server = createAndStartHTTPServer(HTTP_PORT);
+
+ // The handler returns a search result but the XPI will 404.
+ const id = "missing-xpi@tests.mozilla.org";
+ let guid = Utils.makeGUID();
+ let record = createRecordForThisApp(guid, id, true, false);
+ let countTelemetry = new SyncedRecordsTelemetry();
+ /* let failed = */ await store.applyIncomingBatch([record], countTelemetry);
+ // This addon had no source URI so was skipped - but it's not treated as
+ // failure.
+ // XXX - this test isn't testing what we thought it was. Previously the addon
+ // was not being installed due to requireSecureURL checking *before* we'd
+ // attempted to get the XPI.
+ // With requireSecureURL disabled we do see a download failure, but the addon
+ // *does* get added to |failed|.
+ // FTR: onDownloadFailed() is called with ERROR_NETWORK_FAILURE, so it's going
+ // to be tricky to distinguish a 404 from other transient network errors
+ // where we do want the addon to end up in |failed|.
+ // This is being tracked in bug 1284778.
+ // Assert.equal(0, failed.length);
+
+ let addon = await AddonManager.getAddonByID(id);
+ Assert.equal(null, addon);
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_ignore_system() {
+ _("Ensure we ignore system addons");
+ // Our system addon should not appear in getAllIDs
+ await engine._refreshReconcilerState();
+ let num = 0;
+ let ids = await store.getAllIDs();
+ for (let guid in ids) {
+ num += 1;
+ let addon = reconciler.getAddonStateFromSyncGUID(guid);
+ Assert.notEqual(addon.id, SYSTEM_ADDON_ID);
+ }
+ Assert.greater(num, 1, "should have seen at least one.");
+});
+
+add_task(async function test_incoming_system() {
+ _("Ensure we handle incoming records that refer to a system addon");
+ // eg, loop initially had a normal addon but it was then "promoted" to be a
+ // system addon but wanted to keep the same ID. The server record exists due
+ // to this.
+
+ // before we start, ensure the system addon isn't disabled.
+ Assert.ok(!(await AddonManager.getAddonByID(SYSTEM_ADDON_ID).userDisabled));
+
+ // Now simulate an incoming record with the same ID as the system addon,
+ // but flagged as disabled - it should not be applied.
+ let server = createAndStartHTTPServer(HTTP_PORT);
+ // We make the incoming record flag the system addon as disabled - it should
+ // be ignored.
+ let guid = Utils.makeGUID();
+ let record = createRecordForThisApp(guid, SYSTEM_ADDON_ID, false, false);
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch([record], countTelemetry);
+ Assert.equal(0, failed.length);
+
+ // The system addon should still not be userDisabled.
+ Assert.ok(!(await AddonManager.getAddonByID(SYSTEM_ADDON_ID).userDisabled));
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_wipe() {
+ _("Ensures that wiping causes add-ons to be uninstalled.");
+
+ await installAddon(XPIS.test_addon1, reconciler);
+
+ await store.wipe();
+
+ let addon = await AddonManager.getAddonByID(ID1);
+ Assert.equal(null, addon);
+});
+
+add_task(async function test_wipe_and_install() {
+ _("Ensure wipe followed by install works.");
+
+ // This tests the reset sync flow where remote data is replaced by local. The
+ // receiving client will see a wipe followed by a record which should undo
+ // the wipe.
+ let installed = await installAddon(XPIS.test_addon1, reconciler);
+
+ let record = createRecordForThisApp(installed.syncGUID, ID1, true, false);
+
+ await store.wipe();
+
+ let deleted = await AddonManager.getAddonByID(ID1);
+ Assert.equal(null, deleted);
+
+ // Re-applying the record can require re-fetching the XPI.
+ let server = createAndStartHTTPServer(HTTP_PORT);
+
+ await store.applyIncoming(record);
+
+ let fetched = await AddonManager.getAddonByID(record.addonID);
+ Assert.ok(!!fetched);
+
+ // wipe again to we are left with a clean slate.
+ await store.wipe();
+
+ await promiseStopServer(server);
+});
+
+// STR for what this is testing:
+// * Either:
+// * Install then remove an addon, then delete addons.json from the profile
+// or corrupt it (in which case the addon manager will remove it)
+// * Install then remove an addon while addon caching is disabled, then
+// re-enable addon caching.
+// * Install the same addon in a different profile, sync it.
+// * Sync this profile
+// Before bug 1467904, the addon would fail to install because this profile
+// has a copy of the addon in our addonsreconciler.json, but the addon manager
+// does *not* have a copy in its cache, and repopulating that cache would not
+// re-add it as the addon is no longer installed locally.
+add_task(async function test_incoming_reconciled_but_not_cached() {
+ _(
+ "Ensure we handle incoming records our reconciler has but the addon cache does not"
+ );
+
+ // Make sure addon is not installed.
+ let addon = await AddonManager.getAddonByID(ID1);
+ Assert.equal(null, addon);
+
+ Services.prefs.setBoolPref("extensions.getAddons.cache.enabled", false);
+
+ addon = await installAddon(XPIS.test_addon1, reconciler);
+ Assert.notEqual(await AddonManager.getAddonByID(ID1), null);
+ await uninstallAddon(addon, reconciler);
+
+ Services.prefs.setBoolPref("extensions.getAddons.cache.enabled", true);
+
+ // now pretend it is incoming.
+ let server = createAndStartHTTPServer(HTTP_PORT);
+ let guid = Utils.makeGUID();
+ let record = createRecordForThisApp(guid, ID1, true, false);
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch([record], countTelemetry);
+ Assert.equal(0, failed.length);
+
+ Assert.notEqual(await AddonManager.getAddonByID(ID1), null);
+
+ await promiseStopServer(server);
+});
+
+// NOTE: The test above must be the last test run due to the addon cache
+// being trashed. It is probably possible to fix that by running, eg,
+// AddonRespository.backgroundUpdateCheck() to rebuild the cache, but that
+// requires implementing more AMO functionality in our test server
+
+add_task(async function cleanup() {
+ // There's an xpcom-shutdown hook for this, but let's give this a shot.
+ reconciler.stopListening();
+});
diff --git a/services/sync/tests/unit/test_addons_tracker.js b/services/sync/tests/unit/test_addons_tracker.js
new file mode 100644
index 0000000000..f8473e4cfa
--- /dev/null
+++ b/services/sync/tests/unit/test_addons_tracker.js
@@ -0,0 +1,174 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { AddonsEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/addons.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+AddonTestUtils.init(this);
+AddonTestUtils.createAppInfo(
+ "xpcshell@tests.mozilla.org",
+ "XPCShell",
+ "1",
+ "1.9.2"
+);
+AddonTestUtils.overrideCertDB();
+
+Services.prefs.setBoolPref("extensions.experiments.enabled", true);
+
+Svc.PrefBranch.setBoolPref("engine.addons", true);
+
+let reconciler;
+let tracker;
+
+const addon1ID = "addon1@tests.mozilla.org";
+
+const ADDONS = {
+ test_addon1: {
+ manifest: {
+ browser_specific_settings: { gecko: { id: addon1ID } },
+ },
+ },
+};
+
+const XPIS = {};
+
+async function cleanup() {
+ tracker.stop();
+
+ tracker.resetScore();
+ await tracker.clearChangedIDs();
+
+ reconciler._addons = {};
+ reconciler._changes = [];
+ await reconciler.saveState();
+}
+
+add_task(async function setup() {
+ await AddonTestUtils.promiseStartupManager();
+ for (let [name, data] of Object.entries(ADDONS)) {
+ XPIS[name] = AddonTestUtils.createTempWebExtensionFile(data);
+ }
+ await Service.engineManager.register(AddonsEngine);
+ let engine = Service.engineManager.get("addons");
+ reconciler = engine._reconciler;
+ tracker = engine._tracker;
+
+ await cleanup();
+});
+
+add_task(async function test_empty() {
+ _("Verify the tracker is empty to start with.");
+
+ Assert.equal(0, Object.keys(await tracker.getChangedIDs()).length);
+ Assert.equal(0, tracker.score);
+
+ await cleanup();
+});
+
+add_task(async function test_not_tracking() {
+ _("Ensures the tracker doesn't do anything when it isn't tracking.");
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ await uninstallAddon(addon, reconciler);
+
+ Assert.equal(0, Object.keys(await tracker.getChangedIDs()).length);
+ Assert.equal(0, tracker.score);
+
+ await cleanup();
+});
+
+add_task(async function test_track_install() {
+ _("Ensure that installing an add-on notifies tracker.");
+
+ reconciler.startListening();
+
+ tracker.start();
+
+ Assert.equal(0, tracker.score);
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ let changed = await tracker.getChangedIDs();
+
+ Assert.equal(1, Object.keys(changed).length);
+ Assert.ok(addon.syncGUID in changed);
+ Assert.equal(SCORE_INCREMENT_XLARGE, tracker.score);
+
+ await uninstallAddon(addon, reconciler);
+ await cleanup();
+});
+
+add_task(async function test_track_uninstall() {
+ _("Ensure that uninstalling an add-on notifies tracker.");
+
+ reconciler.startListening();
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ let guid = addon.syncGUID;
+ Assert.equal(0, tracker.score);
+
+ tracker.start();
+
+ await uninstallAddon(addon, reconciler);
+ let changed = await tracker.getChangedIDs();
+ Assert.equal(1, Object.keys(changed).length);
+ Assert.ok(guid in changed);
+ Assert.equal(SCORE_INCREMENT_XLARGE, tracker.score);
+
+ await cleanup();
+});
+
+add_task(async function test_track_user_disable() {
+ _("Ensure that tracker sees disabling of add-on");
+
+ reconciler.startListening();
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ Assert.ok(!addon.userDisabled);
+ Assert.ok(!addon.appDisabled);
+ Assert.ok(addon.isActive);
+
+ tracker.start();
+ Assert.equal(0, tracker.score);
+
+ _("Disabling add-on");
+ await addon.disable();
+ await reconciler.queueCaller.promiseCallsComplete();
+
+ let changed = await tracker.getChangedIDs();
+ Assert.equal(1, Object.keys(changed).length);
+ Assert.ok(addon.syncGUID in changed);
+ Assert.equal(SCORE_INCREMENT_XLARGE, tracker.score);
+
+ await uninstallAddon(addon, reconciler);
+ await cleanup();
+});
+
+add_task(async function test_track_enable() {
+ _("Ensure that enabling a disabled add-on notifies tracker.");
+
+ reconciler.startListening();
+
+ let addon = await installAddon(XPIS.test_addon1, reconciler);
+ await addon.disable();
+ await Async.promiseYield();
+
+ Assert.equal(0, tracker.score);
+
+ tracker.start();
+ await addon.enable();
+ await Async.promiseYield();
+ await reconciler.queueCaller.promiseCallsComplete();
+
+ let changed = await tracker.getChangedIDs();
+ Assert.equal(1, Object.keys(changed).length);
+ Assert.ok(addon.syncGUID in changed);
+ Assert.equal(SCORE_INCREMENT_XLARGE, tracker.score);
+
+ await uninstallAddon(addon, reconciler);
+ await cleanup();
+});
diff --git a/services/sync/tests/unit/test_addons_validator.js b/services/sync/tests/unit/test_addons_validator.js
new file mode 100644
index 0000000000..60f2f8bf43
--- /dev/null
+++ b/services/sync/tests/unit/test_addons_validator.js
@@ -0,0 +1,65 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { AddonValidator } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/addons.sys.mjs"
+);
+
+function getDummyServerAndClient() {
+ return {
+ server: [
+ {
+ id: "1111",
+ applicationID: Services.appinfo.ID,
+ addonID: "synced-addon@example.com",
+ enabled: true,
+ source: "amo",
+ understood: true,
+ },
+ ],
+ client: [
+ {
+ syncGUID: "1111",
+ id: "synced-addon@example.com",
+ type: "extension",
+ isSystem: false,
+ isSyncable: true,
+ },
+ {
+ syncGUID: "2222",
+ id: "system-addon@example.com",
+ type: "extension",
+ isSystem: true,
+ isSyncable: false,
+ },
+ {
+ // Plugins don't have a `syncedGUID`, but we don't sync them, so we
+ // shouldn't report them as client duplicates.
+ id: "some-plugin",
+ type: "plugin",
+ },
+ {
+ id: "another-plugin",
+ type: "plugin",
+ },
+ ],
+ };
+}
+
+add_task(async function test_valid() {
+ let { server, client } = getDummyServerAndClient();
+ let validator = new AddonValidator({
+ _findDupe(item) {
+ return null;
+ },
+ isAddonSyncable(item) {
+ return item.type != "plugin";
+ },
+ });
+ let { problemData, clientRecords, records, deletedRecords } =
+ await validator.compareClientWithServer(client, server);
+ equal(clientRecords.length, 4);
+ equal(records.length, 1);
+ equal(deletedRecords.length, 0);
+ deepEqual(problemData, validator.emptyProblemData());
+});
diff --git a/services/sync/tests/unit/test_bookmark_batch_fail.js b/services/sync/tests/unit/test_bookmark_batch_fail.js
new file mode 100644
index 0000000000..9644d730e4
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_batch_fail.js
@@ -0,0 +1,25 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+_("Making sure a failing sync reports a useful error");
+// `Service` is used as a global in head_helpers.js.
+// eslint-disable-next-line no-unused-vars
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_bookmark_test(async function run_test(engine) {
+ await engine.initialize();
+ engine._syncStartup = async function () {
+ throw new Error("FAIL!");
+ };
+
+ try {
+ _("Try calling the sync that should throw right away");
+ await engine._sync();
+ do_throw("Should have failed sync!");
+ } catch (ex) {
+ _("Making sure what we threw ended up as the exception:", ex);
+ Assert.equal(ex.message, "FAIL!");
+ }
+});
diff --git a/services/sync/tests/unit/test_bookmark_decline_undecline.js b/services/sync/tests/unit/test_bookmark_decline_undecline.js
new file mode 100644
index 0000000000..12139dd163
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_decline_undecline.js
@@ -0,0 +1,48 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+// A stored reference to the collection won't be valid after disabling.
+function getBookmarkWBO(server, guid) {
+ let coll = server.user("foo").collection("bookmarks");
+ if (!coll) {
+ return null;
+ }
+ return coll.wbo(guid);
+}
+
+add_task(async function test_decline_undecline() {
+ let engine = Service.engineManager.get("bookmarks");
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ try {
+ let { guid: bzGuid } = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "https://bugzilla.mozilla.org",
+ index: PlacesUtils.bookmarks.DEFAULT_INDEX,
+ title: "bugzilla",
+ });
+
+ ok(!getBookmarkWBO(server, bzGuid), "Shouldn't have been uploaded yet");
+ await Service.sync();
+ ok(getBookmarkWBO(server, bzGuid), "Should be present on server");
+
+ engine.enabled = false;
+ await Service.sync();
+ ok(
+ !getBookmarkWBO(server, bzGuid),
+ "Shouldn't be present on server anymore"
+ );
+
+ engine.enabled = true;
+ await Service.sync();
+ ok(getBookmarkWBO(server, bzGuid), "Should be present on server again");
+ } finally {
+ await PlacesSyncUtils.bookmarks.reset();
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_bookmark_engine.js b/services/sync/tests/unit/test_bookmark_engine.js
new file mode 100644
index 0000000000..6274a6b836
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_engine.js
@@ -0,0 +1,1555 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { BookmarkHTMLUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/BookmarkHTMLUtils.sys.mjs"
+);
+const { BookmarkJSONUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/BookmarkJSONUtils.sys.mjs"
+);
+const { Bookmark, BookmarkFolder, BookmarksEngine, Livemark } =
+ ChromeUtils.importESModule(
+ "resource://services-sync/engines/bookmarks.sys.mjs"
+ );
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { SyncedRecordsTelemetry } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+);
+
+var recordedEvents = [];
+
+function checkRecordedEvents(object, expected, message) {
+ // Ignore event telemetry from the merger.
+ let checkEvents = recordedEvents.filter(event => event.object == object);
+ deepEqual(checkEvents, expected, message);
+ // and clear the list so future checks are easier to write.
+ recordedEvents = [];
+}
+
+async function fetchAllRecordIds() {
+ let db = await PlacesUtils.promiseDBConnection();
+ let rows = await db.executeCached(`
+ WITH RECURSIVE
+ syncedItems(id, guid) AS (
+ SELECT b.id, b.guid FROM moz_bookmarks b
+ WHERE b.guid IN ('menu________', 'toolbar_____', 'unfiled_____',
+ 'mobile______')
+ UNION ALL
+ SELECT b.id, b.guid FROM moz_bookmarks b
+ JOIN syncedItems s ON b.parent = s.id
+ )
+ SELECT guid FROM syncedItems`);
+ let recordIds = new Set();
+ for (let row of rows) {
+ let recordId = PlacesSyncUtils.bookmarks.guidToRecordId(
+ row.getResultByName("guid")
+ );
+ recordIds.add(recordId);
+ }
+ return recordIds;
+}
+
+async function cleanupEngine(engine) {
+ await engine.resetClient();
+ await engine._store.wipe();
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Service.recordManager.clearCache();
+ // Note we don't finalize the engine here as add_bookmark_test() does.
+}
+
+async function cleanup(engine, server) {
+ await promiseStopServer(server);
+ await cleanupEngine(engine);
+}
+
+add_task(async function setup() {
+ await generateNewKeys(Service.collectionKeys);
+ await Service.engineManager.unregister("bookmarks");
+
+ Service.recordTelemetryEvent = (object, method, value, extra = undefined) => {
+ recordedEvents.push({ object, method, value, extra });
+ };
+});
+
+add_task(async function test_buffer_timeout() {
+ await Service.recordManager.clearCache();
+ await PlacesSyncUtils.bookmarks.reset();
+ let engine = new BookmarksEngine(Service);
+ engine._newWatchdog = function () {
+ // Return an already-aborted watchdog, so that we can abort merges
+ // immediately.
+ let watchdog = Async.watchdog();
+ watchdog.controller.abort();
+ return watchdog;
+ };
+ await engine.initialize();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("bookmarks");
+
+ try {
+ info("Insert local bookmarks");
+ await PlacesUtils.bookmarks.insertTree({
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ children: [
+ {
+ guid: "bookmarkAAAA",
+ url: "http://example.com/a",
+ title: "A",
+ },
+ {
+ guid: "bookmarkBBBB",
+ url: "http://example.com/b",
+ title: "B",
+ },
+ ],
+ });
+
+ info("Insert remote bookmarks");
+ collection.insert(
+ "menu",
+ encryptPayload({
+ id: "menu",
+ type: "folder",
+ parentid: "places",
+ title: "menu",
+ children: ["bookmarkCCCC", "bookmarkDDDD"],
+ })
+ );
+ collection.insert(
+ "bookmarkCCCC",
+ encryptPayload({
+ id: "bookmarkCCCC",
+ type: "bookmark",
+ parentid: "menu",
+ bmkUri: "http://example.com/c",
+ title: "C",
+ })
+ );
+ collection.insert(
+ "bookmarkDDDD",
+ encryptPayload({
+ id: "bookmarkDDDD",
+ type: "bookmark",
+ parentid: "menu",
+ bmkUri: "http://example.com/d",
+ title: "D",
+ })
+ );
+
+ info("We expect this sync to fail");
+ await Assert.rejects(
+ sync_engine_and_validate_telem(engine, true),
+ ex => ex.name == "InterruptedError"
+ );
+ } finally {
+ await cleanup(engine, server);
+ await engine.finalize();
+ }
+});
+
+add_bookmark_test(async function test_maintenance_after_failure(engine) {
+ _("Ensure we try to run maintenance if the engine fails to sync");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ try {
+ let syncStartup = engine._syncStartup;
+ let syncError = new Error("Something is rotten in the state of Places");
+ engine._syncStartup = function () {
+ throw syncError;
+ };
+
+ Services.prefs.clearUserPref("places.database.lastMaintenance");
+
+ _("Ensure the sync fails and we run maintenance");
+ await Assert.rejects(
+ sync_engine_and_validate_telem(engine, true),
+ ex => ex == syncError
+ );
+ checkRecordedEvents(
+ "maintenance",
+ [
+ {
+ object: "maintenance",
+ method: "run",
+ value: "bookmarks",
+ extra: undefined,
+ },
+ ],
+ "Should record event for first maintenance run"
+ );
+
+ _("Sync again, but ensure maintenance doesn't run");
+ await Assert.rejects(
+ sync_engine_and_validate_telem(engine, true),
+ ex => ex == syncError
+ );
+ checkRecordedEvents(
+ "maintenance",
+ [],
+ "Should not record event if maintenance didn't run"
+ );
+
+ _("Fast-forward last maintenance pref; ensure maintenance runs");
+ Services.prefs.setIntPref(
+ "places.database.lastMaintenance",
+ Date.now() / 1000 - 14400
+ );
+ await Assert.rejects(
+ sync_engine_and_validate_telem(engine, true),
+ ex => ex == syncError
+ );
+ checkRecordedEvents(
+ "maintenance",
+ [
+ {
+ object: "maintenance",
+ method: "run",
+ value: "bookmarks",
+ extra: undefined,
+ },
+ ],
+ "Should record event for second maintenance run"
+ );
+
+ _("Fix sync failure; ensure we report success after maintenance");
+ engine._syncStartup = syncStartup;
+ await sync_engine_and_validate_telem(engine, false);
+ checkRecordedEvents(
+ "maintenance",
+ [
+ {
+ object: "maintenance",
+ method: "fix",
+ value: "bookmarks",
+ extra: undefined,
+ },
+ ],
+ "Should record event for successful sync after second maintenance"
+ );
+
+ await sync_engine_and_validate_telem(engine, false);
+ checkRecordedEvents(
+ "maintenance",
+ [],
+ "Should not record maintenance events after successful sync"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_bookmark_test(async function test_delete_invalid_roots_from_server(engine) {
+ _("Ensure that we delete the Places and Reading List roots from the server.");
+
+ enableValidationPrefs();
+
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("bookmarks");
+
+ engine._tracker.start();
+
+ try {
+ let placesRecord = await store.createRecord("places");
+ collection.insert("places", encryptPayload(placesRecord.cleartext));
+
+ let listBmk = new Bookmark("bookmarks", Utils.makeGUID());
+ listBmk.bmkUri = "https://example.com";
+ listBmk.title = "Example reading list entry";
+ listBmk.parentName = "Reading List";
+ listBmk.parentid = "readinglist";
+ collection.insert(listBmk.id, encryptPayload(listBmk.cleartext));
+
+ let readingList = new BookmarkFolder("bookmarks", "readinglist");
+ readingList.title = "Reading List";
+ readingList.children = [listBmk.id];
+ readingList.parentName = "";
+ readingList.parentid = "places";
+ collection.insert("readinglist", encryptPayload(readingList.cleartext));
+
+ // Note that we don't insert a record for the toolbar, so the engine will
+ // report a parent-child disagreement, since Firefox's `parentid` is
+ // `toolbar`.
+ let newBmk = new Bookmark("bookmarks", Utils.makeGUID());
+ newBmk.bmkUri = "http://getfirefox.com";
+ newBmk.title = "Get Firefox!";
+ newBmk.parentName = "Bookmarks Toolbar";
+ newBmk.parentid = "toolbar";
+ collection.insert(newBmk.id, encryptPayload(newBmk.cleartext));
+
+ deepEqual(
+ collection.keys().sort(),
+ ["places", "readinglist", listBmk.id, newBmk.id].sort(),
+ "Should store Places root, reading list items, and new bookmark on server"
+ );
+
+ let ping = await sync_engine_and_validate_telem(engine, true);
+ // In a real sync, the engine is named `bookmarks-buffered`.
+ // However, `sync_engine_and_validate_telem` simulates a sync where
+ // the engine isn't registered with the engine manager, so the recorder
+ // doesn't see its `overrideTelemetryName`.
+ let engineData = ping.engines.find(e => e.name == "bookmarks");
+ ok(engineData.validation, "Bookmarks engine should always run validation");
+ equal(
+ engineData.validation.checked,
+ 6,
+ "Bookmarks engine should validate all items"
+ );
+ deepEqual(
+ engineData.validation.problems,
+ [
+ {
+ name: "parentChildDisagreements",
+ count: 1,
+ },
+ ],
+ "Bookmarks engine should report parent-child disagreement"
+ );
+ deepEqual(
+ engineData.steps.map(step => step.name),
+ [
+ "fetchLocalTree",
+ "fetchRemoteTree",
+ "merge",
+ "apply",
+ "notifyObservers",
+ "fetchLocalChangeRecords",
+ ],
+ "Bookmarks engine should report all merge steps"
+ );
+
+ deepEqual(
+ collection.keys().sort(),
+ ["menu", "mobile", "toolbar", "unfiled", newBmk.id].sort(),
+ "Should remove Places root and reading list items from server; upload local roots"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_bookmark_test(async function test_processIncoming_error_orderChildren(
+ engine
+) {
+ _(
+ "Ensure that _orderChildren() is called even when _processIncoming() throws an error."
+ );
+
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("bookmarks");
+
+ try {
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ title: "Folder 1",
+ });
+
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ let bmk2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ });
+
+ let toolbar_record = await store.createRecord("toolbar");
+ collection.insert("toolbar", encryptPayload(toolbar_record.cleartext));
+
+ let bmk1_record = await store.createRecord(bmk1.guid);
+ collection.insert(bmk1.guid, encryptPayload(bmk1_record.cleartext));
+
+ let bmk2_record = await store.createRecord(bmk2.guid);
+ collection.insert(bmk2.guid, encryptPayload(bmk2_record.cleartext));
+
+ // Create a server record for folder1 where we flip the order of
+ // the children.
+ let folder1_record = await store.createRecord(folder1.guid);
+ let folder1_payload = folder1_record.cleartext;
+ folder1_payload.children.reverse();
+ collection.insert(folder1.guid, encryptPayload(folder1_payload));
+
+ // Create a bogus record that when synced down will provoke a
+ // network error which in turn provokes an exception in _processIncoming.
+ const BOGUS_GUID = "zzzzzzzzzzzz";
+ let bogus_record = collection.insert(BOGUS_GUID, "I'm a bogus record!");
+ bogus_record.get = function get() {
+ throw new Error("Sync this!");
+ };
+
+ // Make the 10 minutes old so it will only be synced in the toFetch phase.
+ bogus_record.modified = new_timestamp() - 60 * 10;
+ await engine.setLastSync(new_timestamp() - 60);
+ engine.toFetch = new SerializableSet([BOGUS_GUID]);
+
+ let error;
+ try {
+ await sync_engine_and_validate_telem(engine, true);
+ } catch (ex) {
+ error = ex;
+ }
+ ok(!!error);
+
+ // Verify that the bookmark order has been applied.
+ folder1_record = await store.createRecord(folder1.guid);
+ let new_children = folder1_record.children;
+ Assert.deepEqual(
+ new_children.sort(),
+ [folder1_payload.children[0], folder1_payload.children[1]].sort()
+ );
+
+ let localChildIds = await PlacesSyncUtils.bookmarks.fetchChildRecordIds(
+ folder1.guid
+ );
+ Assert.deepEqual(localChildIds.sort(), [bmk2.guid, bmk1.guid].sort());
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_bookmark_test(async function test_restorePromptsReupload(engine) {
+ await test_restoreOrImport(engine, { replace: true });
+});
+
+add_bookmark_test(async function test_importPromptsReupload(engine) {
+ await test_restoreOrImport(engine, { replace: false });
+});
+
+// Test a JSON restore or HTML import. Use JSON if `replace` is `true`, or
+// HTML otherwise.
+async function test_restoreOrImport(engine, { replace }) {
+ let verb = replace ? "restore" : "import";
+ let verbing = replace ? "restoring" : "importing";
+ let bookmarkUtils = replace ? BookmarkJSONUtils : BookmarkHTMLUtils;
+
+ _(`Ensure that ${verbing} from a backup will reupload all records.`);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("bookmarks");
+
+ engine._tracker.start(); // We skip usual startup...
+
+ try {
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ title: "Folder 1",
+ });
+
+ _("Create a single record.");
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ _(`Get Firefox!: ${bmk1.guid}`);
+
+ let backupFilePath = PathUtils.join(
+ PathUtils.tempDir,
+ `t_b_e_${Date.now()}.json`
+ );
+
+ _("Make a backup.");
+
+ await bookmarkUtils.exportToFile(backupFilePath);
+
+ _("Create a different record and sync.");
+ let bmk2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ });
+ _(`Get Thunderbird!: ${bmk2.guid}`);
+
+ await PlacesUtils.bookmarks.remove(bmk1.guid);
+
+ let error;
+ try {
+ await sync_engine_and_validate_telem(engine, false);
+ } catch (ex) {
+ error = ex;
+ _("Got error: " + Log.exceptionStr(ex));
+ }
+ Assert.ok(!error);
+
+ _(
+ "Verify that there's only one bookmark on the server, and it's Thunderbird."
+ );
+ // Of course, there's also the Bookmarks Toolbar and Bookmarks Menu...
+ let wbos = collection.keys(function (id) {
+ return !["menu", "toolbar", "mobile", "unfiled", folder1.guid].includes(
+ id
+ );
+ });
+ Assert.equal(wbos.length, 1);
+ Assert.equal(wbos[0], bmk2.guid);
+
+ _(`Now ${verb} from a backup.`);
+ await bookmarkUtils.importFromFile(backupFilePath, { replace });
+
+ // If `replace` is `true`, we'll wipe the server on the next sync.
+ let bookmarksCollection = server.user("foo").collection("bookmarks");
+ _("Verify that we didn't wipe the server.");
+ Assert.ok(!!bookmarksCollection);
+
+ _("Ensure we have the bookmarks we expect locally.");
+ let recordIds = await fetchAllRecordIds();
+ _("GUIDs: " + JSON.stringify([...recordIds]));
+
+ let bookmarkRecordIds = new Map();
+ let count = 0;
+ for (let recordId of recordIds) {
+ count++;
+ let info = await PlacesUtils.bookmarks.fetch(
+ PlacesSyncUtils.bookmarks.recordIdToGuid(recordId)
+ );
+ // Only one bookmark, so _all_ should be Firefox!
+ if (info.type == PlacesUtils.bookmarks.TYPE_BOOKMARK) {
+ _(`Found URI ${info.url.href} for record ID ${recordId}`);
+ bookmarkRecordIds.set(info.url.href, recordId);
+ }
+ }
+ Assert.ok(bookmarkRecordIds.has("http://getfirefox.com/"));
+ if (!replace) {
+ Assert.ok(bookmarkRecordIds.has("http://getthunderbird.com/"));
+ }
+
+ _("Have the correct number of IDs locally, too.");
+ let expectedResults = [
+ "menu",
+ "toolbar",
+ "mobile",
+ "unfiled",
+ folder1.guid,
+ bmk1.guid,
+ ];
+ if (!replace) {
+ expectedResults.push("toolbar", folder1.guid, bmk2.guid);
+ }
+ Assert.equal(count, expectedResults.length);
+
+ _("Sync again. This'll wipe bookmarks from the server.");
+ try {
+ await sync_engine_and_validate_telem(engine, false);
+ } catch (ex) {
+ error = ex;
+ _("Got error: " + Log.exceptionStr(ex));
+ }
+ Assert.ok(!error);
+
+ _("Verify that there's the right bookmarks on the server.");
+ // Of course, there's also the Bookmarks Toolbar and Bookmarks Menu...
+ let payloads = server.user("foo").collection("bookmarks").payloads();
+ let bookmarkWBOs = payloads.filter(function (wbo) {
+ return wbo.type == "bookmark";
+ });
+
+ let folderWBOs = payloads.filter(function (wbo) {
+ return (
+ wbo.type == "folder" &&
+ wbo.id != "menu" &&
+ wbo.id != "toolbar" &&
+ wbo.id != "unfiled" &&
+ wbo.id != "mobile" &&
+ wbo.parentid != "menu"
+ );
+ });
+
+ let expectedFX = {
+ id: bookmarkRecordIds.get("http://getfirefox.com/"),
+ bmkUri: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ };
+ let expectedTB = {
+ id: bookmarkRecordIds.get("http://getthunderbird.com/"),
+ bmkUri: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ };
+
+ let expectedBookmarks;
+ if (replace) {
+ expectedBookmarks = [expectedFX];
+ } else {
+ expectedBookmarks = [expectedTB, expectedFX];
+ }
+
+ doCheckWBOs(bookmarkWBOs, expectedBookmarks);
+
+ _("Our old friend Folder 1 is still in play.");
+ let expectedFolder1 = { title: "Folder 1" };
+
+ let expectedFolders;
+ if (replace) {
+ expectedFolders = [expectedFolder1];
+ } else {
+ expectedFolders = [expectedFolder1, expectedFolder1];
+ }
+
+ doCheckWBOs(folderWBOs, expectedFolders);
+ } finally {
+ await cleanup(engine, server);
+ }
+}
+
+function doCheckWBOs(WBOs, expected) {
+ Assert.equal(WBOs.length, expected.length);
+ for (let i = 0; i < expected.length; i++) {
+ let lhs = WBOs[i];
+ let rhs = expected[i];
+ if ("id" in rhs) {
+ Assert.equal(lhs.id, rhs.id);
+ }
+ if ("bmkUri" in rhs) {
+ Assert.equal(lhs.bmkUri, rhs.bmkUri);
+ }
+ if ("title" in rhs) {
+ Assert.equal(lhs.title, rhs.title);
+ }
+ }
+}
+
+function FakeRecord(constructor, r) {
+ this.defaultCleartext = constructor.prototype.defaultCleartext;
+ constructor.call(this, "bookmarks", r.id);
+ for (let x in r) {
+ this[x] = r[x];
+ }
+ // Borrow the constructor's conversion functions.
+ this.toSyncBookmark = constructor.prototype.toSyncBookmark;
+ this.cleartextToString = constructor.prototype.cleartextToString;
+}
+
+// Bug 632287.
+// (Note that `test_mismatched_folder_types()` in
+// toolkit/components/places/tests/sync/test_bookmark_kinds.js is an exact
+// copy of this test, so it's fine to remove it as part of bug 1449730)
+add_task(async function test_mismatched_types() {
+ _(
+ "Ensure that handling a record that changes type causes deletion " +
+ "then re-adding."
+ );
+
+ let oldRecord = {
+ id: "l1nZZXfB8nC7",
+ type: "folder",
+ parentName: "Bookmarks Toolbar",
+ title: "Innerst i Sneglehode",
+ description: null,
+ parentid: "toolbar",
+ };
+
+ let newRecord = {
+ id: "l1nZZXfB8nC7",
+ type: "livemark",
+ siteUri: "http://sneglehode.wordpress.com/",
+ feedUri: "http://sneglehode.wordpress.com/feed/",
+ parentName: "Bookmarks Toolbar",
+ title: "Innerst i Sneglehode",
+ description: null,
+ children: [
+ "HCRq40Rnxhrd",
+ "YeyWCV1RVsYw",
+ "GCceVZMhvMbP",
+ "sYi2hevdArlF",
+ "vjbZlPlSyGY8",
+ "UtjUhVyrpeG6",
+ "rVq8WMG2wfZI",
+ "Lx0tcy43ZKhZ",
+ "oT74WwV8_j4P",
+ "IztsItWVSo3-",
+ ],
+ parentid: "toolbar",
+ };
+
+ let engine = new BookmarksEngine(Service);
+ await engine.initialize();
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ try {
+ let oldR = new FakeRecord(BookmarkFolder, oldRecord);
+ let newR = new FakeRecord(Livemark, newRecord);
+ oldR.parentid = PlacesUtils.bookmarks.toolbarGuid;
+ newR.parentid = PlacesUtils.bookmarks.toolbarGuid;
+
+ await store.applyIncoming(oldR);
+ await engine._apply();
+ _("Applied old. It's a folder.");
+ let oldID = await PlacesTestUtils.promiseItemId(oldR.id);
+ _("Old ID: " + oldID);
+ let oldInfo = await PlacesUtils.bookmarks.fetch(oldR.id);
+ Assert.equal(oldInfo.type, PlacesUtils.bookmarks.TYPE_FOLDER);
+
+ await store.applyIncoming(newR);
+ await engine._apply();
+ } finally {
+ await cleanup(engine, server);
+ await engine.finalize();
+ }
+});
+
+add_bookmark_test(async function test_misreconciled_root(engine) {
+ _("Ensure that we don't reconcile an arbitrary record with a root.");
+
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ // Log real hard for this test.
+ store._log.trace = store._log.debug;
+ engine._log.trace = engine._log.debug;
+
+ await engine._syncStartup();
+
+ // Let's find out where the toolbar is right now.
+ let toolbarBefore = await store.createRecord("toolbar", "bookmarks");
+ let toolbarIDBefore = await PlacesTestUtils.promiseItemId(
+ PlacesUtils.bookmarks.toolbarGuid
+ );
+ Assert.notEqual(-1, toolbarIDBefore);
+
+ let parentRecordIDBefore = toolbarBefore.parentid;
+ let parentGUIDBefore =
+ PlacesSyncUtils.bookmarks.recordIdToGuid(parentRecordIDBefore);
+ let parentIDBefore = await PlacesTestUtils.promiseItemId(parentGUIDBefore);
+ Assert.equal("string", typeof parentGUIDBefore);
+
+ _("Current parent: " + parentGUIDBefore + " (" + parentIDBefore + ").");
+
+ let to_apply = {
+ id: "zzzzzzzzzzzz",
+ type: "folder",
+ title: "Bookmarks Toolbar",
+ description: "Now you're for it.",
+ parentName: "",
+ parentid: "mobile", // Why not?
+ children: [],
+ };
+
+ let rec = new FakeRecord(BookmarkFolder, to_apply);
+
+ _("Applying record.");
+ let countTelemetry = new SyncedRecordsTelemetry();
+ await store.applyIncomingBatch([rec], countTelemetry);
+
+ // Ensure that afterwards, toolbar is still there.
+ // As of 2012-12-05, this only passes because Places doesn't use "toolbar" as
+ // the real GUID, instead using a generated one. Sync does the translation.
+ let toolbarAfter = await store.createRecord("toolbar", "bookmarks");
+ let parentRecordIDAfter = toolbarAfter.parentid;
+ let parentGUIDAfter =
+ PlacesSyncUtils.bookmarks.recordIdToGuid(parentRecordIDAfter);
+ let parentIDAfter = await PlacesTestUtils.promiseItemId(parentGUIDAfter);
+ Assert.equal(
+ await PlacesTestUtils.promiseItemGuid(toolbarIDBefore),
+ PlacesUtils.bookmarks.toolbarGuid
+ );
+ Assert.equal(parentGUIDBefore, parentGUIDAfter);
+ Assert.equal(parentIDBefore, parentIDAfter);
+
+ await cleanup(engine, server);
+});
+
+add_bookmark_test(async function test_invalid_url(engine) {
+ _("Ensure an incoming invalid bookmark URL causes an outgoing tombstone.");
+
+ let server = await serverForFoo(engine);
+ let collection = server.user("foo").collection("bookmarks");
+
+ await SyncTestingInfrastructure(server);
+ await engine._syncStartup();
+
+ // check the URL really is invalid.
+ let url = "https://www.42registry.42/";
+ Assert.throws(() => Services.io.newURI(url), /invalid/);
+
+ let guid = "abcdefabcdef";
+
+ let toolbar = new BookmarkFolder("bookmarks", "toolbar");
+ toolbar.title = "toolbar";
+ toolbar.parentName = "";
+ toolbar.parentid = "places";
+ toolbar.children = [guid];
+ collection.insert("toolbar", encryptPayload(toolbar.cleartext));
+
+ let item1 = new Bookmark("bookmarks", guid);
+ item1.bmkUri = "https://www.42registry.42/";
+ item1.title = "invalid url";
+ item1.parentName = "Bookmarks Toolbar";
+ item1.parentid = "toolbar";
+ item1.dateAdded = 1234;
+ collection.insert(guid, encryptPayload(item1.cleartext));
+
+ _("syncing.");
+ await sync_engine_and_validate_telem(engine, false);
+
+ // We should find the record now exists on the server as a tombstone.
+ let updated = collection.cleartext(guid);
+ Assert.ok(updated.deleted, "record was deleted");
+
+ let local = await PlacesUtils.bookmarks.fetch(guid);
+ Assert.deepEqual(local, null, "no local bookmark exists");
+
+ await cleanup(engine, server);
+});
+
+add_bookmark_test(async function test_sync_dateAdded(engine) {
+ await Service.recordManager.clearCache();
+ await PlacesSyncUtils.bookmarks.reset();
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("bookmarks");
+
+ // TODO: Avoid random orange (bug 1374599), this is only necessary
+ // intermittently - reset the last sync date so that we'll get all bookmarks.
+ await engine.setLastSync(1);
+
+ engine._tracker.start(); // We skip usual startup...
+
+ // Just matters that it's in the past, not how far.
+ let now = Date.now();
+ let oneYearMS = 365 * 24 * 60 * 60 * 1000;
+
+ try {
+ let toolbar = new BookmarkFolder("bookmarks", "toolbar");
+ toolbar.title = "toolbar";
+ toolbar.parentName = "";
+ toolbar.parentid = "places";
+ toolbar.children = [
+ "abcdefabcdef",
+ "aaaaaaaaaaaa",
+ "bbbbbbbbbbbb",
+ "cccccccccccc",
+ "dddddddddddd",
+ "eeeeeeeeeeee",
+ ];
+ collection.insert("toolbar", encryptPayload(toolbar.cleartext));
+
+ let item1GUID = "abcdefabcdef";
+ let item1 = new Bookmark("bookmarks", item1GUID);
+ item1.bmkUri = "https://example.com";
+ item1.title = "asdf";
+ item1.parentName = "Bookmarks Toolbar";
+ item1.parentid = "toolbar";
+ item1.dateAdded = now - oneYearMS;
+ collection.insert(item1GUID, encryptPayload(item1.cleartext));
+
+ let item2GUID = "aaaaaaaaaaaa";
+ let item2 = new Bookmark("bookmarks", item2GUID);
+ item2.bmkUri = "https://example.com/2";
+ item2.title = "asdf2";
+ item2.parentName = "Bookmarks Toolbar";
+ item2.parentid = "toolbar";
+ item2.dateAdded = now + oneYearMS;
+ const item2LastModified = now / 1000 - 100;
+ collection.insert(
+ item2GUID,
+ encryptPayload(item2.cleartext),
+ item2LastModified
+ );
+
+ let item3GUID = "bbbbbbbbbbbb";
+ let item3 = new Bookmark("bookmarks", item3GUID);
+ item3.bmkUri = "https://example.com/3";
+ item3.title = "asdf3";
+ item3.parentName = "Bookmarks Toolbar";
+ item3.parentid = "toolbar";
+ // no dateAdded
+ collection.insert(item3GUID, encryptPayload(item3.cleartext));
+
+ let item4GUID = "cccccccccccc";
+ let item4 = new Bookmark("bookmarks", item4GUID);
+ item4.bmkUri = "https://example.com/4";
+ item4.title = "asdf4";
+ item4.parentName = "Bookmarks Toolbar";
+ item4.parentid = "toolbar";
+ // no dateAdded, but lastModified in past
+ const item4LastModified = (now - oneYearMS) / 1000;
+ collection.insert(
+ item4GUID,
+ encryptPayload(item4.cleartext),
+ item4LastModified
+ );
+
+ let item5GUID = "dddddddddddd";
+ let item5 = new Bookmark("bookmarks", item5GUID);
+ item5.bmkUri = "https://example.com/5";
+ item5.title = "asdf5";
+ item5.parentName = "Bookmarks Toolbar";
+ item5.parentid = "toolbar";
+ // no dateAdded, lastModified in (near) future.
+ const item5LastModified = (now + 60000) / 1000;
+ collection.insert(
+ item5GUID,
+ encryptPayload(item5.cleartext),
+ item5LastModified
+ );
+
+ let item6GUID = "eeeeeeeeeeee";
+ let item6 = new Bookmark("bookmarks", item6GUID);
+ item6.bmkUri = "https://example.com/6";
+ item6.title = "asdf6";
+ item6.parentName = "Bookmarks Toolbar";
+ item6.parentid = "toolbar";
+ const item6LastModified = (now - oneYearMS) / 1000;
+ collection.insert(
+ item6GUID,
+ encryptPayload(item6.cleartext),
+ item6LastModified
+ );
+
+ await sync_engine_and_validate_telem(engine, false);
+
+ let record1 = await store.createRecord(item1GUID);
+ let record2 = await store.createRecord(item2GUID);
+
+ equal(
+ item1.dateAdded,
+ record1.dateAdded,
+ "dateAdded in past should be synced"
+ );
+ equal(
+ record2.dateAdded,
+ item2LastModified * 1000,
+ "dateAdded in future should be ignored in favor of last modified"
+ );
+
+ let record3 = await store.createRecord(item3GUID);
+
+ ok(record3.dateAdded);
+ // Make sure it's within 24 hours of the right timestamp... This is a little
+ // dodgey but we only really care that it's basically accurate and has the
+ // right day.
+ ok(Math.abs(Date.now() - record3.dateAdded) < 24 * 60 * 60 * 1000);
+
+ let record4 = await store.createRecord(item4GUID);
+ equal(
+ record4.dateAdded,
+ item4LastModified * 1000,
+ "If no dateAdded is provided, lastModified should be used"
+ );
+
+ let record5 = await store.createRecord(item5GUID);
+ equal(
+ record5.dateAdded,
+ item5LastModified * 1000,
+ "If no dateAdded is provided, lastModified should be used (even if it's in the future)"
+ );
+
+ // Update item2 and try resyncing it.
+ item2.dateAdded = now - 100000;
+ collection.insert(
+ item2GUID,
+ encryptPayload(item2.cleartext),
+ now / 1000 - 50
+ );
+
+ // Also, add a local bookmark and make sure its date added makes it up to the server
+ let bz = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "https://bugzilla.mozilla.org/",
+ title: "Bugzilla",
+ });
+
+ // last sync did a POST, which doesn't advance its lastModified value.
+ // Next sync of the engine doesn't hit info/collections, so lastModified
+ // remains stale. Setting it to null side-steps that.
+ engine.lastModified = null;
+ await sync_engine_and_validate_telem(engine, false);
+
+ let newRecord2 = await store.createRecord(item2GUID);
+ equal(
+ newRecord2.dateAdded,
+ item2.dateAdded,
+ "dateAdded update should work for earlier date"
+ );
+
+ let bzWBO = collection.cleartext(bz.guid);
+ ok(bzWBO.dateAdded, "Locally added dateAdded lost");
+
+ let localRecord = await store.createRecord(bz.guid);
+ equal(
+ bzWBO.dateAdded,
+ localRecord.dateAdded,
+ "dateAdded should not change during upload"
+ );
+
+ item2.dateAdded += 10000;
+ collection.insert(
+ item2GUID,
+ encryptPayload(item2.cleartext),
+ now / 1000 - 10
+ );
+
+ engine.lastModified = null;
+ await sync_engine_and_validate_telem(engine, false);
+
+ let newerRecord2 = await store.createRecord(item2GUID);
+ equal(
+ newerRecord2.dateAdded,
+ newRecord2.dateAdded,
+ "dateAdded update should be ignored for later date if we know an earlier one "
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_buffer_hasDupe() {
+ await Service.recordManager.clearCache();
+ await PlacesSyncUtils.bookmarks.reset();
+ let engine = new BookmarksEngine(Service);
+ await engine.initialize();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("bookmarks");
+ engine._tracker.start(); // We skip usual startup...
+ try {
+ let guid1 = Utils.makeGUID();
+ let guid2 = Utils.makeGUID();
+ await PlacesUtils.bookmarks.insert({
+ guid: guid1,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "https://www.example.com",
+ title: "example.com",
+ });
+ await PlacesUtils.bookmarks.insert({
+ guid: guid2,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "https://www.example.com",
+ title: "example.com",
+ });
+
+ await sync_engine_and_validate_telem(engine, false);
+ // Make sure we set hasDupe on outgoing records
+ Assert.ok(collection.payloads().every(payload => payload.hasDupe));
+
+ await PlacesUtils.bookmarks.remove(guid1);
+
+ await sync_engine_and_validate_telem(engine, false);
+
+ let tombstone = JSON.parse(
+ JSON.parse(collection.payload(guid1)).ciphertext
+ );
+ // We shouldn't set hasDupe on tombstones.
+ Assert.ok(tombstone.deleted);
+ Assert.ok(!tombstone.hasDupe);
+
+ let record = JSON.parse(JSON.parse(collection.payload(guid2)).ciphertext);
+ // We should set hasDupe on weakly uploaded records.
+ Assert.ok(!record.deleted);
+ Assert.ok(
+ record.hasDupe,
+ "Bookmarks bookmark engine should set hasDupe for weakly uploaded records."
+ );
+
+ await sync_engine_and_validate_telem(engine, false);
+ } finally {
+ await cleanup(engine, server);
+ await engine.finalize();
+ }
+});
+
+// Bug 890217.
+add_bookmark_test(async function test_sync_imap_URLs(engine) {
+ await Service.recordManager.clearCache();
+ await PlacesSyncUtils.bookmarks.reset();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("bookmarks");
+
+ engine._tracker.start(); // We skip usual startup...
+
+ try {
+ collection.insert(
+ "menu",
+ encryptPayload({
+ id: "menu",
+ type: "folder",
+ parentid: "places",
+ title: "Bookmarks Menu",
+ children: ["bookmarkAAAA"],
+ })
+ );
+ collection.insert(
+ "bookmarkAAAA",
+ encryptPayload({
+ id: "bookmarkAAAA",
+ type: "bookmark",
+ parentid: "menu",
+ bmkUri:
+ "imap://vs@eleven.vs.solnicky.cz:993/fetch%3EUID%3E/" +
+ "INBOX%3E56291?part=1.2&type=image/jpeg&filename=" +
+ "invalidazPrahy.jpg",
+ title:
+ "invalidazPrahy.jpg (JPEG Image, 1280x1024 pixels) - Scaled (71%)",
+ })
+ );
+
+ await PlacesUtils.bookmarks.insert({
+ guid: "bookmarkBBBB",
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url:
+ "imap://eleven.vs.solnicky.cz:993/fetch%3EUID%3E/" +
+ "CURRENT%3E2433?part=1.2&type=text/html&filename=TomEdwards.html",
+ title: "TomEdwards.html",
+ });
+
+ await sync_engine_and_validate_telem(engine, false);
+
+ let aInfo = await PlacesUtils.bookmarks.fetch("bookmarkAAAA");
+ equal(
+ aInfo.url.href,
+ "imap://vs@eleven.vs.solnicky.cz:993/" +
+ "fetch%3EUID%3E/INBOX%3E56291?part=1.2&type=image/jpeg&filename=" +
+ "invalidazPrahy.jpg",
+ "Remote bookmark A with IMAP URL should exist locally"
+ );
+
+ let bPayload = collection.cleartext("bookmarkBBBB");
+ equal(
+ bPayload.bmkUri,
+ "imap://eleven.vs.solnicky.cz:993/" +
+ "fetch%3EUID%3E/CURRENT%3E2433?part=1.2&type=text/html&filename=" +
+ "TomEdwards.html",
+ "Local bookmark B with IMAP URL should exist remotely"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_resume_buffer() {
+ await Service.recordManager.clearCache();
+ let engine = new BookmarksEngine(Service);
+ await engine.initialize();
+ await engine._store.wipe();
+ await engine.resetClient();
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("bookmarks");
+
+ engine._tracker.start(); // We skip usual startup...
+
+ const batchChunkSize = 50;
+
+ engine._store._batchChunkSize = batchChunkSize;
+ try {
+ let children = [];
+
+ let timestamp = round_timestamp(Date.now());
+ // Add two chunks worth of records to the server
+ for (let i = 0; i < batchChunkSize * 2; ++i) {
+ let cleartext = {
+ id: Utils.makeGUID(),
+ type: "bookmark",
+ parentid: "toolbar",
+ title: `Bookmark ${i}`,
+ parentName: "Bookmarks Toolbar",
+ bmkUri: `https://example.com/${i}`,
+ };
+ let wbo = collection.insert(
+ cleartext.id,
+ encryptPayload(cleartext),
+ timestamp + 10 * i
+ );
+ // Something that is effectively random, but deterministic.
+ // (This is just to ensure we don't accidentally start using the
+ // sortindex again).
+ wbo.sortindex = 1000 + Math.round(Math.sin(i / 5) * 100);
+ children.push(cleartext.id);
+ }
+
+ // Add the parent of those records, and ensure its timestamp is the most recent.
+ collection.insert(
+ "toolbar",
+ encryptPayload({
+ id: "toolbar",
+ type: "folder",
+ parentid: "places",
+ title: "Bookmarks Toolbar",
+ children,
+ }),
+ timestamp + 10 * children.length
+ );
+
+ // Replace applyIncomingBatch with a custom one that calls the original,
+ // but forces it to throw on the 2nd chunk.
+ let origApplyIncomingBatch = engine._store.applyIncomingBatch;
+ engine._store.applyIncomingBatch = function (records) {
+ if (records.length > batchChunkSize) {
+ // Hacky way to make reading from the batchChunkSize'th record throw.
+ delete records[batchChunkSize];
+ Object.defineProperty(records, batchChunkSize, {
+ get() {
+ throw new Error("D:");
+ },
+ });
+ }
+ return origApplyIncomingBatch.call(this, records);
+ };
+
+ let caughtError;
+ _("We expect this to fail");
+ try {
+ await sync_engine_and_validate_telem(engine, true);
+ } catch (e) {
+ caughtError = e;
+ }
+ Assert.ok(caughtError, "Expected engine.sync to throw");
+ Assert.equal(caughtError.message, "D:");
+
+ // The buffer subtracts one second from the actual timestamp.
+ let lastSync = (await engine.getLastSync()) + 1;
+ // We poisoned the batchChunkSize'th record, so the last successfully
+ // applied record will be batchChunkSize - 1.
+ let expectedLastSync = timestamp + 10 * (batchChunkSize - 1);
+ Assert.equal(expectedLastSync, lastSync);
+
+ engine._store.applyIncomingBatch = origApplyIncomingBatch;
+
+ await sync_engine_and_validate_telem(engine, false);
+
+ // Check that all the children made it onto the correct record.
+ let toolbarRecord = await engine._store.createRecord("toolbar");
+ Assert.deepEqual(toolbarRecord.children.sort(), children.sort());
+ } finally {
+ await cleanup(engine, server);
+ await engine.finalize();
+ }
+});
+
+add_bookmark_test(async function test_livemarks(engine) {
+ _("Ensure we replace new and existing livemarks with tombstones");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("bookmarks");
+ let now = Date.now();
+
+ try {
+ _("Insert existing livemark");
+ let modifiedForA = now - 5 * 60 * 1000;
+ await PlacesUtils.bookmarks.insert({
+ guid: "livemarkAAAA",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "A",
+ lastModified: new Date(modifiedForA),
+ dateAdded: new Date(modifiedForA),
+ source: PlacesUtils.bookmarks.SOURCE_SYNC,
+ });
+ collection.insert(
+ "menu",
+ encryptPayload({
+ id: "menu",
+ type: "folder",
+ parentName: "",
+ title: "menu",
+ children: ["livemarkAAAA"],
+ parentid: "places",
+ }),
+ round_timestamp(modifiedForA)
+ );
+ collection.insert(
+ "livemarkAAAA",
+ encryptPayload({
+ id: "livemarkAAAA",
+ type: "livemark",
+ feedUri: "http://example.com/a",
+ parentName: "menu",
+ title: "A",
+ parentid: "menu",
+ }),
+ round_timestamp(modifiedForA)
+ );
+
+ _("Insert remotely updated livemark");
+ await PlacesUtils.bookmarks.insert({
+ guid: "livemarkBBBB",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ title: "B",
+ lastModified: new Date(now),
+ dateAdded: new Date(now),
+ });
+ collection.insert(
+ "toolbar",
+ encryptPayload({
+ id: "toolbar",
+ type: "folder",
+ parentName: "",
+ title: "toolbar",
+ children: ["livemarkBBBB"],
+ parentid: "places",
+ }),
+ round_timestamp(now)
+ );
+ collection.insert(
+ "livemarkBBBB",
+ encryptPayload({
+ id: "livemarkBBBB",
+ type: "livemark",
+ feedUri: "http://example.com/b",
+ parentName: "toolbar",
+ title: "B",
+ parentid: "toolbar",
+ }),
+ round_timestamp(now)
+ );
+
+ _("Insert new remote livemark");
+ collection.insert(
+ "unfiled",
+ encryptPayload({
+ id: "unfiled",
+ type: "folder",
+ parentName: "",
+ title: "unfiled",
+ children: ["livemarkCCCC"],
+ parentid: "places",
+ }),
+ round_timestamp(now)
+ );
+ collection.insert(
+ "livemarkCCCC",
+ encryptPayload({
+ id: "livemarkCCCC",
+ type: "livemark",
+ feedUri: "http://example.com/c",
+ parentName: "unfiled",
+ title: "C",
+ parentid: "unfiled",
+ }),
+ round_timestamp(now)
+ );
+
+ _("Bump last sync time to ignore A");
+ await engine.setLastSync(round_timestamp(now) - 60);
+
+ _("Sync");
+ await sync_engine_and_validate_telem(engine, false);
+
+ deepEqual(
+ collection.keys().sort(),
+ [
+ "livemarkAAAA",
+ "livemarkBBBB",
+ "livemarkCCCC",
+ "menu",
+ "mobile",
+ "toolbar",
+ "unfiled",
+ ],
+ "Should store original livemark A and tombstones for B and C on server"
+ );
+
+ let payloads = collection.payloads();
+
+ deepEqual(
+ payloads.find(payload => payload.id == "menu").children,
+ ["livemarkAAAA"],
+ "Should keep A in menu"
+ );
+ ok(
+ !payloads.find(payload => payload.id == "livemarkAAAA").deleted,
+ "Should not upload tombstone for A"
+ );
+
+ deepEqual(
+ payloads.find(payload => payload.id == "toolbar").children,
+ [],
+ "Should remove B from toolbar"
+ );
+ ok(
+ payloads.find(payload => payload.id == "livemarkBBBB").deleted,
+ "Should upload tombstone for B"
+ );
+
+ deepEqual(
+ payloads.find(payload => payload.id == "unfiled").children,
+ [],
+ "Should remove C from unfiled"
+ );
+ ok(
+ payloads.find(payload => payload.id == "livemarkCCCC").deleted,
+ "Should replace C with tombstone"
+ );
+
+ await assertBookmarksTreeMatches(
+ "",
+ [
+ {
+ guid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ children: [
+ {
+ guid: "livemarkAAAA",
+ index: 0,
+ },
+ ],
+ },
+ {
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ index: 1,
+ },
+ {
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ index: 3,
+ },
+ {
+ guid: PlacesUtils.bookmarks.mobileGuid,
+ index: 4,
+ },
+ ],
+ "Should keep A and remove B locally"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_bookmark_test(async function test_unknown_fields(engine) {
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("bookmarks");
+ try {
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ title: "Folder 1",
+ });
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ let bmk2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ });
+ let toolbar_record = await store.createRecord("toolbar");
+ collection.insert("toolbar", encryptPayload(toolbar_record.cleartext));
+
+ let folder1_record_without_unknown_fields = await store.createRecord(
+ folder1.guid
+ );
+ collection.insert(
+ folder1.guid,
+ encryptPayload(folder1_record_without_unknown_fields.cleartext)
+ );
+
+ // First bookmark record has an unknown string field
+ let bmk1_record = await store.createRecord(bmk1.guid);
+ console.log("bmk1_record: ", bmk1_record);
+ bmk1_record.cleartext.unknownStrField =
+ "an unknown field from another client";
+ collection.insert(bmk1.guid, encryptPayload(bmk1_record.cleartext));
+
+ // Second bookmark record as an unknown object field
+ let bmk2_record = await store.createRecord(bmk2.guid);
+ bmk2_record.cleartext.unknownObjField = {
+ name: "an unknown object from another client",
+ };
+ collection.insert(bmk2.guid, encryptPayload(bmk2_record.cleartext));
+
+ // Sync the two bookmarks
+ await sync_engine_and_validate_telem(engine, true);
+
+ // Add a folder could also have an unknown field
+ let folder1_record = await store.createRecord(folder1.guid);
+ folder1_record.cleartext.unknownStrField =
+ "a folder could also have an unknown field!";
+ collection.insert(folder1.guid, encryptPayload(folder1_record.cleartext));
+
+ // sync the new updates
+ await engine.setLastSync(1);
+ await sync_engine_and_validate_telem(engine, true);
+
+ let payloads = collection.payloads();
+ // Validate the server has the unknown fields at the top level (and now unknownFields)
+ let server_bmk1 = payloads.find(payload => payload.id == bmk1.guid);
+ deepEqual(
+ server_bmk1.unknownStrField,
+ "an unknown field from another client",
+ "unknown fields correctly on the record"
+ );
+ Assert.equal(server_bmk1.unknownFields, null);
+
+ // Check that the mirror table has unknown fields
+ let db = await PlacesUtils.promiseDBConnection();
+ let rows = await db.executeCached(
+ `
+ SELECT guid, title, unknownFields from items WHERE guid IN
+ (:bmk1, :bmk2, :folder1)`,
+ { bmk1: bmk1.guid, bmk2: bmk2.guid, folder1: folder1.guid }
+ );
+ // We should have 3 rows that came from the server
+ Assert.equal(rows.length, 3);
+
+ // Bookmark 1 - unknown string field
+ let remote_bmk1 = rows.find(
+ row => row.getResultByName("guid") == bmk1.guid
+ );
+ Assert.equal(remote_bmk1.getResultByName("title"), "Get Firefox!");
+ deepEqual(JSON.parse(remote_bmk1.getResultByName("unknownFields")), {
+ unknownStrField: "an unknown field from another client",
+ });
+
+ // Bookmark 2 - unknown object field
+ let remote_bmk2 = rows.find(
+ row => row.getResultByName("guid") == bmk2.guid
+ );
+ Assert.equal(remote_bmk2.getResultByName("title"), "Get Thunderbird!");
+ deepEqual(JSON.parse(remote_bmk2.getResultByName("unknownFields")), {
+ unknownObjField: {
+ name: "an unknown object from another client",
+ },
+ });
+
+ // Folder with unknown field
+
+ // check the server still has the unknown field
+ deepEqual(
+ payloads.find(payload => payload.id == folder1.guid).unknownStrField,
+ "a folder could also have an unknown field!",
+ "Server still has the unknown field"
+ );
+
+ let remote_folder = rows.find(
+ row => row.getResultByName("guid") == folder1.guid
+ );
+ Assert.equal(remote_folder.getResultByName("title"), "Folder 1");
+ deepEqual(JSON.parse(remote_folder.getResultByName("unknownFields")), {
+ unknownStrField: "a folder could also have an unknown field!",
+ });
+ } finally {
+ await cleanup(engine, server);
+ }
+});
diff --git a/services/sync/tests/unit/test_bookmark_order.js b/services/sync/tests/unit/test_bookmark_order.js
new file mode 100644
index 0000000000..fc182b81ef
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_order.js
@@ -0,0 +1,586 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+_(
+ "Making sure after processing incoming bookmarks, they show up in the right order"
+);
+const { Bookmark, BookmarkFolder } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/bookmarks.sys.mjs"
+);
+const { Weave } = ChromeUtils.importESModule(
+ "resource://services-sync/main.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+async function serverForFoo(engine) {
+ await generateNewKeys(Service.collectionKeys);
+
+ let clientsEngine = Service.clientsEngine;
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+ let engineSyncID = await engine.resetLocalSyncID();
+ return serverForUsers(
+ { foo: "password" },
+ {
+ meta: {
+ global: {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {
+ clients: {
+ version: clientsEngine.version,
+ syncID: clientsSyncID,
+ },
+ [engine.name]: {
+ version: engine.version,
+ syncID: engineSyncID,
+ },
+ },
+ },
+ },
+ crypto: {
+ keys: encryptPayload({
+ id: "keys",
+ // Generate a fake default key bundle to avoid resetting the client
+ // before the first sync.
+ default: [
+ await Weave.Crypto.generateRandomKey(),
+ await Weave.Crypto.generateRandomKey(),
+ ],
+ }),
+ },
+ [engine.name]: {},
+ }
+ );
+}
+
+async function resolveConflict(
+ engine,
+ collection,
+ timestamp,
+ buildTree,
+ message
+) {
+ let guids = {
+ // These items don't exist on the server.
+ fx: Utils.makeGUID(),
+ nightly: Utils.makeGUID(),
+ support: Utils.makeGUID(),
+ customize: Utils.makeGUID(),
+
+ // These exist on the server, but in a different order, and `res`
+ // has completely different children.
+ res: Utils.makeGUID(),
+ tb: Utils.makeGUID(),
+
+ // These don't exist locally.
+ bz: Utils.makeGUID(),
+ irc: Utils.makeGUID(),
+ mdn: Utils.makeGUID(),
+ };
+
+ await PlacesUtils.bookmarks.insertTree({
+ guid: PlacesUtils.bookmarks.menuGuid,
+ children: [
+ {
+ guid: guids.fx,
+ title: "Get Firefox!",
+ url: "http://getfirefox.com/",
+ },
+ {
+ guid: guids.res,
+ title: "Resources",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ children: [
+ {
+ guid: guids.nightly,
+ title: "Nightly",
+ url: "https://nightly.mozilla.org/",
+ },
+ {
+ guid: guids.support,
+ title: "Support",
+ url: "https://support.mozilla.org/",
+ },
+ {
+ guid: guids.customize,
+ title: "Customize",
+ url: "https://mozilla.org/firefox/customize/",
+ },
+ ],
+ },
+ {
+ title: "Get Thunderbird!",
+ guid: guids.tb,
+ url: "http://getthunderbird.com/",
+ },
+ ],
+ });
+
+ let serverRecords = [
+ {
+ id: "menu",
+ type: "folder",
+ title: "Bookmarks Menu",
+ parentid: "places",
+ children: [guids.tb, guids.res],
+ },
+ {
+ id: guids.tb,
+ type: "bookmark",
+ parentid: "menu",
+ bmkUri: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ },
+ {
+ id: guids.res,
+ type: "folder",
+ parentid: "menu",
+ title: "Resources",
+ children: [guids.irc, guids.bz, guids.mdn],
+ },
+ {
+ id: guids.bz,
+ type: "bookmark",
+ parentid: guids.res,
+ bmkUri: "https://bugzilla.mozilla.org/",
+ title: "Bugzilla",
+ },
+ {
+ id: guids.mdn,
+ type: "bookmark",
+ parentid: guids.res,
+ bmkUri: "https://developer.mozilla.org/",
+ title: "MDN",
+ },
+ {
+ id: guids.irc,
+ type: "bookmark",
+ parentid: guids.res,
+ bmkUri: "ircs://irc.mozilla.org/nightly",
+ title: "IRC",
+ },
+ ];
+ for (let record of serverRecords) {
+ collection.insert(record.id, encryptPayload(record), timestamp);
+ }
+
+ engine.lastModified = collection.timestamp;
+ await sync_engine_and_validate_telem(engine, false);
+
+ let expectedTree = buildTree(guids);
+ await assertBookmarksTreeMatches(
+ PlacesUtils.bookmarks.menuGuid,
+ expectedTree,
+ message
+ );
+}
+
+async function get_engine() {
+ return Service.engineManager.get("bookmarks");
+}
+
+add_task(async function test_local_order_newer() {
+ let engine = await get_engine();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ try {
+ let collection = server.user("foo").collection("bookmarks");
+ let serverModified = Date.now() / 1000 - 120;
+ await resolveConflict(
+ engine,
+ collection,
+ serverModified,
+ guids => [
+ {
+ guid: guids.fx,
+ index: 0,
+ },
+ {
+ guid: guids.res,
+ index: 1,
+ children: [
+ {
+ guid: guids.nightly,
+ index: 0,
+ },
+ {
+ guid: guids.support,
+ index: 1,
+ },
+ {
+ guid: guids.customize,
+ index: 2,
+ },
+ {
+ guid: guids.irc,
+ index: 3,
+ },
+ {
+ guid: guids.bz,
+ index: 4,
+ },
+ {
+ guid: guids.mdn,
+ index: 5,
+ },
+ ],
+ },
+ {
+ guid: guids.tb,
+ index: 2,
+ },
+ ],
+ "Should use local order as base if remote is older"
+ );
+ } finally {
+ await engine.wipeClient();
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_remote_order_newer() {
+ let engine = await get_engine();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ try {
+ let collection = server.user("foo").collection("bookmarks");
+ let serverModified = Date.now() / 1000 + 120;
+ await resolveConflict(
+ engine,
+ collection,
+ serverModified,
+ guids => [
+ {
+ guid: guids.tb,
+ index: 0,
+ },
+ {
+ guid: guids.res,
+ index: 1,
+ children: [
+ {
+ guid: guids.irc,
+ index: 0,
+ },
+ {
+ guid: guids.bz,
+ index: 1,
+ },
+ {
+ guid: guids.mdn,
+ index: 2,
+ },
+ {
+ guid: guids.nightly,
+ index: 3,
+ },
+ {
+ guid: guids.support,
+ index: 4,
+ },
+ {
+ guid: guids.customize,
+ index: 5,
+ },
+ ],
+ },
+ {
+ guid: guids.fx,
+ index: 2,
+ },
+ ],
+ "Should use remote order as base if local is older"
+ );
+ } finally {
+ await engine.wipeClient();
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_bookmark_order() {
+ let engine = await get_engine();
+ let store = engine._store;
+ _("Starting with a clean slate of no bookmarks");
+ await store.wipe();
+ await assertBookmarksTreeMatches(
+ "",
+ [
+ {
+ guid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ },
+ {
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ index: 1,
+ },
+ {
+ // Index 2 is the tags root. (Root indices depend on the order of the
+ // `CreateRoot` calls in `Database::CreateBookmarkRoots`).
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ index: 3,
+ },
+ {
+ guid: PlacesUtils.bookmarks.mobileGuid,
+ index: 4,
+ },
+ ],
+ "clean slate"
+ );
+
+ function bookmark(name, parent) {
+ let bm = new Bookmark("http://weave.server/my-bookmark");
+ bm.id = name;
+ bm.title = name;
+ bm.bmkUri = "http://uri/";
+ bm.parentid = parent || "unfiled";
+ bm.tags = [];
+ return bm;
+ }
+
+ function folder(name, parent, children) {
+ let bmFolder = new BookmarkFolder("http://weave.server/my-bookmark-folder");
+ bmFolder.id = name;
+ bmFolder.title = name;
+ bmFolder.parentid = parent || "unfiled";
+ bmFolder.children = children;
+ return bmFolder;
+ }
+
+ async function apply(records) {
+ for (record of records) {
+ await store.applyIncoming(record);
+ }
+ await engine._apply();
+ }
+ let id10 = "10_aaaaaaaaa";
+ _("basic add first bookmark");
+ await apply([bookmark(id10, "")]);
+ await assertBookmarksTreeMatches(
+ "",
+ [
+ {
+ guid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ },
+ {
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ index: 1,
+ },
+ {
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ index: 3,
+ children: [
+ {
+ guid: id10,
+ index: 0,
+ },
+ ],
+ },
+ {
+ guid: PlacesUtils.bookmarks.mobileGuid,
+ index: 4,
+ },
+ ],
+ "basic add first bookmark"
+ );
+ let id20 = "20_aaaaaaaaa";
+ _("basic append behind 10");
+ await apply([bookmark(id20, "")]);
+ await assertBookmarksTreeMatches(
+ "",
+ [
+ {
+ guid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ },
+ {
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ index: 1,
+ },
+ {
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ index: 3,
+ children: [
+ {
+ guid: id10,
+ index: 0,
+ },
+ {
+ guid: id20,
+ index: 1,
+ },
+ ],
+ },
+ {
+ guid: PlacesUtils.bookmarks.mobileGuid,
+ index: 4,
+ },
+ ],
+ "basic append behind 10"
+ );
+
+ let id31 = "31_aaaaaaaaa";
+ let id30 = "f30_aaaaaaaa";
+ _("basic create in folder");
+ let b31 = bookmark(id31, id30);
+ let f30 = folder(id30, "", [id31]);
+ await apply([b31, f30]);
+ await assertBookmarksTreeMatches(
+ "",
+ [
+ {
+ guid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ },
+ {
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ index: 1,
+ },
+ {
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ index: 3,
+ children: [
+ {
+ guid: id10,
+ index: 0,
+ },
+ {
+ guid: id20,
+ index: 1,
+ },
+ {
+ guid: id30,
+ index: 2,
+ children: [
+ {
+ guid: id31,
+ index: 0,
+ },
+ ],
+ },
+ ],
+ },
+ {
+ guid: PlacesUtils.bookmarks.mobileGuid,
+ index: 4,
+ },
+ ],
+ "basic create in folder"
+ );
+
+ let id41 = "41_aaaaaaaaa";
+ let id40 = "f40_aaaaaaaa";
+ _("insert missing parent -> append to unfiled");
+ await apply([bookmark(id41, id40)]);
+ await assertBookmarksTreeMatches(
+ "",
+ [
+ {
+ guid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ },
+ {
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ index: 1,
+ },
+ {
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ index: 3,
+ children: [
+ {
+ guid: id10,
+ index: 0,
+ },
+ {
+ guid: id20,
+ index: 1,
+ },
+ {
+ guid: id30,
+ index: 2,
+ children: [
+ {
+ guid: id31,
+ index: 0,
+ },
+ ],
+ },
+ {
+ guid: id41,
+ index: 3,
+ },
+ ],
+ },
+ {
+ guid: PlacesUtils.bookmarks.mobileGuid,
+ index: 4,
+ },
+ ],
+ "insert missing parent -> append to unfiled"
+ );
+
+ let id42 = "42_aaaaaaaaa";
+
+ _("insert another missing parent -> append");
+ await apply([bookmark(id42, id40)]);
+ await assertBookmarksTreeMatches(
+ "",
+ [
+ {
+ guid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ },
+ {
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ index: 1,
+ },
+ {
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ index: 3,
+ children: [
+ {
+ guid: id10,
+ index: 0,
+ },
+ {
+ guid: id20,
+ index: 1,
+ },
+ {
+ guid: id30,
+ index: 2,
+ children: [
+ {
+ guid: id31,
+ index: 0,
+ },
+ ],
+ },
+ {
+ guid: id41,
+ index: 3,
+ },
+ {
+ guid: id42,
+ index: 4,
+ },
+ ],
+ },
+ {
+ guid: PlacesUtils.bookmarks.mobileGuid,
+ index: 4,
+ },
+ ],
+ "insert another missing parent -> append"
+ );
+
+ await engine.wipeClient();
+ await Service.startOver();
+ await engine.finalize();
+});
diff --git a/services/sync/tests/unit/test_bookmark_places_query_rewriting.js b/services/sync/tests/unit/test_bookmark_places_query_rewriting.js
new file mode 100644
index 0000000000..e8dbbb48b1
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_places_query_rewriting.js
@@ -0,0 +1,57 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+_("Rewrite place: URIs.");
+const { BookmarkQuery, BookmarkFolder } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/bookmarks.sys.mjs"
+);
+// `Service` is used as a global in head_helpers.js.
+// eslint-disable-next-line no-unused-vars
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function makeTagRecord(id, uri) {
+ let tagRecord = new BookmarkQuery("bookmarks", id);
+ tagRecord.queryId = "MagicTags";
+ tagRecord.parentName = "Bookmarks Toolbar";
+ tagRecord.bmkUri = uri;
+ tagRecord.title = "tagtag";
+ tagRecord.folderName = "bar";
+ tagRecord.parentid = PlacesUtils.bookmarks.toolbarGuid;
+ return tagRecord;
+}
+
+add_bookmark_test(async function run_test(engine) {
+ let store = engine._store;
+
+ let toolbar = new BookmarkFolder("bookmarks", "toolbar");
+ toolbar.parentid = "places";
+ toolbar.children = ["abcdefabcdef"];
+
+ let uri = "place:folder=499&type=7&queryType=1";
+ let tagRecord = makeTagRecord("abcdefabcdef", uri);
+
+ _("Type: " + tagRecord.type);
+ _("Folder name: " + tagRecord.folderName);
+ await store.applyIncoming(toolbar);
+ await store.applyIncoming(tagRecord);
+ await engine._apply();
+
+ let insertedRecord = await store.createRecord("abcdefabcdef", "bookmarks");
+ Assert.equal(insertedRecord.bmkUri, "place:tag=bar");
+
+ _("... but not if the type is wrong.");
+ let wrongTypeURI = "place:folder=499&type=2&queryType=1";
+ let wrongTypeRecord = makeTagRecord("fedcbafedcba", wrongTypeURI);
+ await store.applyIncoming(wrongTypeRecord);
+ toolbar.children = ["fedcbafedcba"];
+ await store.applyIncoming(toolbar);
+ let expected = wrongTypeURI;
+ await engine._apply();
+ // the mirror appends a special param to these.
+ expected += "&excludeItems=1";
+
+ insertedRecord = await store.createRecord("fedcbafedcba", "bookmarks");
+ Assert.equal(insertedRecord.bmkUri, expected);
+});
diff --git a/services/sync/tests/unit/test_bookmark_record.js b/services/sync/tests/unit/test_bookmark_record.js
new file mode 100644
index 0000000000..c261027ed9
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_record.js
@@ -0,0 +1,64 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Bookmark, BookmarkQuery, PlacesItem } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/bookmarks.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function prepareBookmarkItem(collection, id) {
+ let b = new Bookmark(collection, id);
+ b.cleartext.stuff = "my payload here";
+ return b;
+}
+
+add_task(async function test_bookmark_record() {
+ await configureIdentity();
+
+ await generateNewKeys(Service.collectionKeys);
+ let keyBundle = Service.identity.syncKeyBundle;
+
+ _("Creating a record");
+
+ let placesItem = new PlacesItem("bookmarks", "foo", "bookmark");
+ let bookmarkItem = prepareBookmarkItem("bookmarks", "foo");
+
+ _("Checking getTypeObject");
+ Assert.equal(placesItem.getTypeObject(placesItem.type), Bookmark);
+ Assert.equal(bookmarkItem.getTypeObject(bookmarkItem.type), Bookmark);
+
+ await bookmarkItem.encrypt(keyBundle);
+ _("Ciphertext is " + bookmarkItem.ciphertext);
+ Assert.ok(bookmarkItem.ciphertext != null);
+
+ _("Decrypting the record");
+
+ let payload = await bookmarkItem.decrypt(keyBundle);
+ Assert.equal(payload.stuff, "my payload here");
+ Assert.equal(bookmarkItem.getTypeObject(bookmarkItem.type), Bookmark);
+ Assert.notEqual(payload, bookmarkItem.payload); // wrap.data.payload is the encrypted one
+});
+
+add_task(async function test_query_foldername() {
+ // Bug 1443388
+ let checks = [
+ ["foo", "foo"],
+ ["", undefined],
+ ];
+ for (let [inVal, outVal] of checks) {
+ let bmk1 = new BookmarkQuery("bookmarks", Utils.makeGUID());
+ bmk1.fromSyncBookmark({
+ url: Services.io.newURI("https://example.com"),
+ folder: inVal,
+ });
+ Assert.strictEqual(bmk1.folderName, outVal);
+
+ // other direction
+ let bmk2 = new BookmarkQuery("bookmarks", Utils.makeGUID());
+ bmk2.folderName = inVal;
+ let record = bmk2.toSyncBookmark();
+ Assert.strictEqual(record.folder, outVal);
+ }
+});
diff --git a/services/sync/tests/unit/test_bookmark_store.js b/services/sync/tests/unit/test_bookmark_store.js
new file mode 100644
index 0000000000..2f4330ed2e
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_store.js
@@ -0,0 +1,425 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Bookmark, BookmarkFolder, BookmarkQuery, PlacesItem } =
+ ChromeUtils.importESModule(
+ "resource://services-sync/engines/bookmarks.sys.mjs"
+ );
+// `Service` is used as a global in head_helpers.js.
+// eslint-disable-next-line no-unused-vars
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+const BookmarksToolbarTitle = "toolbar";
+
+// apply some test records without going via a test server.
+async function apply_records(engine, records) {
+ for (record of records) {
+ await engine._store.applyIncoming(record);
+ }
+ await engine._apply();
+}
+
+add_bookmark_test(async function test_ignore_specials(engine) {
+ _("Ensure that we can't delete bookmark roots.");
+ let store = engine._store;
+
+ // Belt...
+ let record = new BookmarkFolder("bookmarks", "toolbar", "folder");
+ record.deleted = true;
+ Assert.notEqual(
+ null,
+ await PlacesTestUtils.promiseItemId(PlacesUtils.bookmarks.toolbarGuid)
+ );
+
+ await apply_records(engine, [record]);
+
+ // Ensure that the toolbar exists.
+ Assert.notEqual(
+ null,
+ await PlacesTestUtils.promiseItemId(PlacesUtils.bookmarks.toolbarGuid)
+ );
+
+ await apply_records(engine, [record]);
+
+ Assert.notEqual(
+ null,
+ await PlacesTestUtils.promiseItemId(PlacesUtils.bookmarks.toolbarGuid)
+ );
+ await store.wipe();
+});
+
+add_bookmark_test(async function test_bookmark_create(engine) {
+ let store = engine._store;
+
+ try {
+ _("Ensure the record isn't present yet.");
+ let item = await PlacesUtils.bookmarks.fetch({
+ url: "http://getfirefox.com/",
+ });
+ Assert.equal(null, item);
+
+ _("Let's create a new record.");
+ let fxrecord = new Bookmark("bookmarks", "get-firefox1");
+ fxrecord.bmkUri = "http://getfirefox.com/";
+ fxrecord.title = "Get Firefox!";
+ fxrecord.tags = ["firefox", "awesome", "browser"];
+ fxrecord.keyword = "awesome";
+ fxrecord.parentName = BookmarksToolbarTitle;
+ fxrecord.parentid = "toolbar";
+ await apply_records(engine, [fxrecord]);
+
+ _("Verify it has been created correctly.");
+ item = await PlacesUtils.bookmarks.fetch(fxrecord.id);
+ Assert.equal(item.type, PlacesUtils.bookmarks.TYPE_BOOKMARK);
+ Assert.equal(item.url.href, "http://getfirefox.com/");
+ Assert.equal(item.title, fxrecord.title);
+ Assert.equal(item.parentGuid, PlacesUtils.bookmarks.toolbarGuid);
+ let keyword = await PlacesUtils.keywords.fetch(fxrecord.keyword);
+ Assert.equal(keyword.url.href, "http://getfirefox.com/");
+
+ _(
+ "Have the store create a new record object. Verify that it has the same data."
+ );
+ let newrecord = await store.createRecord(fxrecord.id);
+ Assert.ok(newrecord instanceof Bookmark);
+ for (let property of [
+ "type",
+ "bmkUri",
+ "title",
+ "keyword",
+ "parentName",
+ "parentid",
+ ]) {
+ Assert.equal(newrecord[property], fxrecord[property]);
+ }
+ Assert.ok(Utils.deepEquals(newrecord.tags.sort(), fxrecord.tags.sort()));
+
+ _("The calculated sort index is based on frecency data.");
+ Assert.ok(newrecord.sortindex >= 150);
+
+ _("Create a record with some values missing.");
+ let tbrecord = new Bookmark("bookmarks", "thunderbird1");
+ tbrecord.bmkUri = "http://getthunderbird.com/";
+ tbrecord.parentName = BookmarksToolbarTitle;
+ tbrecord.parentid = "toolbar";
+ await apply_records(engine, [tbrecord]);
+
+ _("Verify it has been created correctly.");
+ item = await PlacesUtils.bookmarks.fetch(tbrecord.id);
+ Assert.equal(item.type, PlacesUtils.bookmarks.TYPE_BOOKMARK);
+ Assert.equal(item.url.href, "http://getthunderbird.com/");
+ Assert.equal(item.title, "");
+ Assert.equal(item.parentGuid, PlacesUtils.bookmarks.toolbarGuid);
+ keyword = await PlacesUtils.keywords.fetch({
+ url: "http://getthunderbird.com/",
+ });
+ Assert.equal(null, keyword);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+add_bookmark_test(async function test_bookmark_update(engine) {
+ let store = engine._store;
+
+ try {
+ _("Create a bookmark whose values we'll change.");
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ await PlacesUtils.keywords.insert({
+ url: "http://getfirefox.com/",
+ keyword: "firefox",
+ });
+
+ _("Update the record with some null values.");
+ let record = await store.createRecord(bmk1.guid);
+ record.title = null;
+ record.keyword = null;
+ record.tags = null;
+ await apply_records(engine, [record]);
+
+ _("Verify that the values have been cleared.");
+ let item = await PlacesUtils.bookmarks.fetch(bmk1.guid);
+ Assert.equal(item.title, "");
+ let keyword = await PlacesUtils.keywords.fetch({
+ url: "http://getfirefox.com/",
+ });
+ Assert.equal(null, keyword);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+add_bookmark_test(async function test_bookmark_createRecord(engine) {
+ let store = engine._store;
+
+ try {
+ _("Create a bookmark without a title.");
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com/",
+ });
+
+ _("Verify that the record is created accordingly.");
+ let record = await store.createRecord(bmk1.guid);
+ Assert.equal(record.title, "");
+ Assert.equal(record.keyword, null);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+add_bookmark_test(async function test_folder_create(engine) {
+ let store = engine._store;
+
+ try {
+ _("Create a folder.");
+ let folder = new BookmarkFolder("bookmarks", "testfolder-1");
+ folder.parentName = BookmarksToolbarTitle;
+ folder.parentid = "toolbar";
+ folder.title = "Test Folder";
+ await apply_records(engine, [folder]);
+
+ _("Verify it has been created correctly.");
+ let item = await PlacesUtils.bookmarks.fetch(folder.id);
+ Assert.equal(item.type, PlacesUtils.bookmarks.TYPE_FOLDER);
+ Assert.equal(item.title, folder.title);
+ Assert.equal(item.parentGuid, PlacesUtils.bookmarks.toolbarGuid);
+
+ _(
+ "Have the store create a new record object. Verify that it has the same data."
+ );
+ let newrecord = await store.createRecord(folder.id);
+ Assert.ok(newrecord instanceof BookmarkFolder);
+ for (let property of ["title", "parentName", "parentid"]) {
+ Assert.equal(newrecord[property], folder[property]);
+ }
+
+ _("Folders have high sort index to ensure they're synced first.");
+ Assert.equal(newrecord.sortindex, 1000000);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+add_bookmark_test(async function test_folder_createRecord(engine) {
+ let store = engine._store;
+
+ try {
+ _("Create a folder.");
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ title: "Folder1",
+ });
+
+ _("Create two bookmarks in that folder without assigning them GUIDs.");
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ let bmk2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ });
+
+ _("Create a record for the folder and verify basic properties.");
+ let record = await store.createRecord(folder1.guid);
+ Assert.ok(record instanceof BookmarkFolder);
+ Assert.equal(record.title, "Folder1");
+ Assert.equal(record.parentid, "toolbar");
+ Assert.equal(record.parentName, BookmarksToolbarTitle);
+
+ _(
+ "Verify the folder's children. Ensures that the bookmarks were given GUIDs."
+ );
+ Assert.deepEqual(record.children, [bmk1.guid, bmk2.guid]);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+add_bookmark_test(async function test_deleted(engine) {
+ let store = engine._store;
+
+ try {
+ _("Create a bookmark that will be deleted.");
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ // The engine needs to think we've previously synced it.
+ await PlacesTestUtils.markBookmarksAsSynced();
+
+ _("Delete the bookmark through the store.");
+ let record = new PlacesItem("bookmarks", bmk1.guid);
+ record.deleted = true;
+ await apply_records(engine, [record]);
+ _("Ensure it has been deleted.");
+ let item = await PlacesUtils.bookmarks.fetch(bmk1.guid);
+ let newrec = await store.createRecord(bmk1.guid);
+ Assert.equal(null, item);
+ Assert.equal(newrec.deleted, true);
+ _("Verify that the keyword has been cleared.");
+ let keyword = await PlacesUtils.keywords.fetch({
+ url: "http://getfirefox.com/",
+ });
+ Assert.equal(null, keyword);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+add_bookmark_test(async function test_move_folder(engine) {
+ let store = engine._store;
+ store._childrenToOrder = {}; // *sob* - only needed for legacy.
+
+ try {
+ _("Create two folders and a bookmark in one of them.");
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ title: "Folder1",
+ });
+ let folder2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ title: "Folder2",
+ });
+ let bmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ // add records to the store that represent the current state.
+ await apply_records(engine, [
+ await store.createRecord(folder1.guid),
+ await store.createRecord(folder2.guid),
+ await store.createRecord(bmk.guid),
+ ]);
+
+ _("Now simulate incoming records reparenting it.");
+ let bmkRecord = await store.createRecord(bmk.guid);
+ Assert.equal(bmkRecord.parentid, folder1.guid);
+ bmkRecord.parentid = folder2.guid;
+
+ let folder1Record = await store.createRecord(folder1.guid);
+ Assert.deepEqual(folder1Record.children, [bmk.guid]);
+ folder1Record.children = [];
+ let folder2Record = await store.createRecord(folder2.guid);
+ Assert.deepEqual(folder2Record.children, []);
+ folder2Record.children = [bmk.guid];
+
+ await apply_records(engine, [bmkRecord, folder1Record, folder2Record]);
+
+ _("Verify the new parent.");
+ let movedBmk = await PlacesUtils.bookmarks.fetch(bmk.guid);
+ Assert.equal(movedBmk.parentGuid, folder2.guid);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+add_bookmark_test(async function test_move_order(engine) {
+ let store = engine._store;
+ let tracker = engine._tracker;
+
+ // Make sure the tracker is turned on.
+ tracker.start();
+ try {
+ _("Create two bookmarks");
+ let bmk1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+ let bmk2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ });
+
+ _("Verify order.");
+ let childIds = await PlacesSyncUtils.bookmarks.fetchChildRecordIds(
+ "toolbar"
+ );
+ Assert.deepEqual(childIds, [bmk1.guid, bmk2.guid]);
+ let toolbar = await store.createRecord("toolbar");
+ Assert.deepEqual(toolbar.children, [bmk1.guid, bmk2.guid]);
+
+ _("Move bookmarks around.");
+ store._childrenToOrder = {};
+ toolbar.children = [bmk2.guid, bmk1.guid];
+ await apply_records(engine, [
+ toolbar,
+ await store.createRecord(bmk1.guid),
+ await store.createRecord(bmk2.guid),
+ ]);
+ delete store._childrenToOrder;
+
+ _("Verify new order.");
+ let newChildIds = await PlacesSyncUtils.bookmarks.fetchChildRecordIds(
+ "toolbar"
+ );
+ Assert.deepEqual(newChildIds, [bmk2.guid, bmk1.guid]);
+ } finally {
+ await tracker.stop();
+ _("Clean up.");
+ await store.wipe();
+ }
+});
+
+// Tests Bug 806460, in which query records arrive with empty folder
+// names and missing bookmark URIs.
+add_bookmark_test(async function test_empty_query_doesnt_die(engine) {
+ let record = new BookmarkQuery("bookmarks", "8xoDGqKrXf1P");
+ record.folderName = "";
+ record.queryId = "";
+ record.parentName = "Toolbar";
+ record.parentid = "toolbar";
+
+ // These should not throw.
+ await apply_records(engine, [record]);
+
+ delete record.folderName;
+ await apply_records(engine, [record]);
+});
+
+add_bookmark_test(async function test_calculateIndex_for_invalid_url(engine) {
+ let store = engine._store;
+
+ let folderIndex = await store._calculateIndex({
+ type: "folder",
+ });
+ equal(folderIndex, 1000000, "Should use high sort index for folders");
+
+ let toolbarIndex = await store._calculateIndex({
+ parentid: "toolbar",
+ });
+ equal(toolbarIndex, 150, "Should bump sort index for toolbar bookmarks");
+
+ let validURLIndex = await store._calculateIndex({
+ bmkUri: "http://example.com/a",
+ });
+ greaterOrEqual(validURLIndex, 0, "Should use frecency for index");
+
+ let invalidURLIndex = await store._calculateIndex({
+ bmkUri: "!@#$%",
+ });
+ equal(invalidURLIndex, 0, "Should not throw for invalid URLs");
+});
diff --git a/services/sync/tests/unit/test_bookmark_tracker.js b/services/sync/tests/unit/test_bookmark_tracker.js
new file mode 100644
index 0000000000..9cfbb4de78
--- /dev/null
+++ b/services/sync/tests/unit/test_bookmark_tracker.js
@@ -0,0 +1,1275 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { PlacesTransactions } = ChromeUtils.importESModule(
+ "resource://gre/modules/PlacesTransactions.sys.mjs"
+);
+
+let engine;
+let store;
+let tracker;
+
+const DAY_IN_MS = 24 * 60 * 60 * 1000;
+
+add_task(async function setup() {
+ await Service.engineManager.switchAlternatives();
+ engine = Service.engineManager.get("bookmarks");
+ store = engine._store;
+ tracker = engine._tracker;
+});
+
+// Test helpers.
+async function verifyTrackerEmpty() {
+ await PlacesTestUtils.promiseAsyncUpdates();
+ let changes = await tracker.getChangedIDs();
+ deepEqual(changes, {});
+ equal(tracker.score, 0);
+}
+
+async function resetTracker() {
+ await PlacesTestUtils.markBookmarksAsSynced();
+ tracker.resetScore();
+}
+
+async function cleanup() {
+ await engine.setLastSync(0);
+ await store.wipe();
+ await resetTracker();
+ await tracker.stop();
+}
+
+// startTracking is a signal that the test wants to notice things that happen
+// after this is called (ie, things already tracked should be discarded.)
+async function startTracking() {
+ engine._tracker.start();
+ await PlacesTestUtils.markBookmarksAsSynced();
+}
+
+async function verifyTrackedItems(tracked) {
+ await PlacesTestUtils.promiseAsyncUpdates();
+ let changedIDs = await tracker.getChangedIDs();
+ let trackedIDs = new Set(Object.keys(changedIDs));
+ for (let guid of tracked) {
+ ok(guid in changedIDs, `${guid} should be tracked`);
+ ok(changedIDs[guid].modified > 0, `${guid} should have a modified time`);
+ ok(changedIDs[guid].counter >= -1, `${guid} should have a change counter`);
+ trackedIDs.delete(guid);
+ }
+ equal(
+ trackedIDs.size,
+ 0,
+ `Unhandled tracked IDs: ${JSON.stringify(Array.from(trackedIDs))}`
+ );
+}
+
+async function verifyTrackedCount(expected) {
+ await PlacesTestUtils.promiseAsyncUpdates();
+ let changedIDs = await tracker.getChangedIDs();
+ do_check_attribute_count(changedIDs, expected);
+}
+
+// A debugging helper that dumps the full bookmarks tree.
+// Currently unused, but might come in handy
+// eslint-disable-next-line no-unused-vars
+async function dumpBookmarks() {
+ let columns = [
+ "id",
+ "title",
+ "guid",
+ "syncStatus",
+ "syncChangeCounter",
+ "position",
+ ];
+ return PlacesUtils.promiseDBConnection().then(connection => {
+ let all = [];
+ return connection
+ .executeCached(
+ `SELECT ${columns.join(", ")} FROM moz_bookmarks;`,
+ {},
+ row => {
+ let repr = {};
+ for (let column of columns) {
+ repr[column] = row.getResultByName(column);
+ }
+ all.push(repr);
+ }
+ )
+ .then(() => {
+ dump("All bookmarks:\n");
+ dump(JSON.stringify(all, undefined, 2));
+ });
+ });
+}
+
+add_task(async function test_tracking() {
+ _("Test starting and stopping the tracker");
+
+ // Remove existing tracking information for roots.
+ await startTracking();
+
+ let folder = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "Test Folder",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+
+ // creating the folder should have made 2 changes - the folder itself and
+ // the parent of the folder.
+ await verifyTrackedCount(2);
+ // Reset the changes as the rest of the test doesn't want to see these.
+ await resetTracker();
+
+ function createBmk() {
+ return PlacesUtils.bookmarks.insert({
+ parentGuid: folder.guid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ }
+
+ try {
+ _("Tell the tracker to start tracking changes.");
+ await startTracking();
+ await createBmk();
+ // We expect two changed items because the containing folder
+ // changed as well (new child).
+ await verifyTrackedCount(2);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+
+ _("Notifying twice won't do any harm.");
+ await createBmk();
+ await verifyTrackedCount(3);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_tracker_sql_batching() {
+ _(
+ "Test tracker does the correct thing when it is forced to batch SQL queries"
+ );
+
+ const SQLITE_MAX_VARIABLE_NUMBER = 999;
+ let numItems = SQLITE_MAX_VARIABLE_NUMBER * 2 + 10;
+
+ await startTracking();
+
+ let children = [];
+ for (let i = 0; i < numItems; i++) {
+ children.push({
+ url: "https://example.org/" + i,
+ title: "Sync Bookmark " + i,
+ });
+ }
+ let inserted = await PlacesUtils.bookmarks.insertTree({
+ guid: PlacesUtils.bookmarks.unfiledGuid,
+ children: [
+ {
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ children,
+ },
+ ],
+ });
+
+ Assert.equal(children.length, numItems);
+ Assert.equal(inserted.length, numItems + 1);
+ await verifyTrackedCount(numItems + 2); // The parent and grandparent are also tracked.
+ await resetTracker();
+
+ await PlacesUtils.bookmarks.remove(inserted[0]);
+ await verifyTrackedCount(numItems + 2);
+
+ await cleanup();
+});
+
+add_task(async function test_bookmarkAdded() {
+ _("Items inserted via the synchronous bookmarks API should be tracked");
+
+ try {
+ await startTracking();
+
+ _("Insert a folder using the sync API");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ let syncFolder = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "Sync Folder",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+ await verifyTrackedItems(["menu", syncFolder.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+
+ await resetTracker();
+ await startTracking();
+
+ _("Insert a bookmark using the sync API");
+ totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ let syncBmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: syncFolder.guid,
+ url: "https://example.org/sync",
+ title: "Sync Bookmark",
+ });
+ await verifyTrackedItems([syncFolder.guid, syncBmk.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_bookmarkAdded() {
+ _("Items inserted via the asynchronous bookmarks API should be tracked");
+
+ try {
+ await startTracking();
+
+ _("Insert a folder using the async API");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ let asyncFolder = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "Async Folder",
+ });
+ await verifyTrackedItems(["menu", asyncFolder.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+
+ await resetTracker();
+ await startTracking();
+
+ _("Insert a bookmark using the async API");
+ totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ let asyncBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: asyncFolder.guid,
+ url: "https://example.org/async",
+ title: "Async Bookmark",
+ });
+ await verifyTrackedItems([asyncFolder.guid, asyncBmk.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+
+ await resetTracker();
+ await startTracking();
+
+ _("Insert a separator using the async API");
+ totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ let asyncSep = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_SEPARATOR,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ index: asyncFolder.index,
+ });
+ await verifyTrackedItems(["menu", asyncSep.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemChanged() {
+ _("Items updated using the asynchronous bookmarks API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert a bookmark");
+ let fxBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ _(`Firefox GUID: ${fxBmk.guid}`);
+
+ await startTracking();
+
+ _("Update the bookmark using the async API");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.update({
+ guid: fxBmk.guid,
+ title: "Download Firefox",
+ url: "https://www.mozilla.org/firefox",
+ // PlacesUtils.bookmarks.update rejects last modified dates older than
+ // the added date.
+ lastModified: new Date(Date.now() + DAY_IN_MS),
+ });
+
+ await verifyTrackedItems([fxBmk.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 1);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onItemChanged_itemDates() {
+ _("Changes to item dates should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert a bookmark");
+ let fx_bm = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ _(`Firefox GUID: ${fx_bm.guid}`);
+
+ await startTracking();
+
+ _("Reset the bookmark's added date, should not be tracked");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ let dateAdded = new Date(Date.now() - DAY_IN_MS);
+ await PlacesUtils.bookmarks.update({
+ guid: fx_bm.guid,
+ dateAdded,
+ });
+ await verifyTrackedCount(0);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges);
+
+ await resetTracker();
+
+ _(
+ "Reset the bookmark's added date and another property, should be tracked"
+ );
+ totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ dateAdded = new Date();
+ await PlacesUtils.bookmarks.update({
+ guid: fx_bm.guid,
+ dateAdded,
+ title: "test",
+ });
+ await verifyTrackedItems([fx_bm.guid]);
+ Assert.equal(tracker.score, 2 * SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 1);
+
+ await resetTracker();
+
+ _("Set the bookmark's last modified date");
+ totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ let fx_id = await PlacesTestUtils.promiseItemId(fx_bm.guid);
+ let dateModified = Date.now() * 1000;
+ PlacesUtils.bookmarks.setItemLastModified(fx_id, dateModified);
+ await verifyTrackedItems([fx_bm.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 1);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onItemTagged() {
+ _("Items tagged using the synchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Create a folder");
+ let folder = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "Parent",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+ _("Folder ID: " + folder);
+ _("Folder GUID: " + folder.guid);
+
+ _("Track changes to tags");
+ let uri = CommonUtils.makeURI("http://getfirefox.com");
+ let b = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder.guid,
+ url: uri,
+ title: "Get Firefox!",
+ });
+ _("New item is " + b);
+ _("GUID: " + b.guid);
+
+ await startTracking();
+
+ _("Tag the item");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ PlacesUtils.tagging.tagURI(uri, ["foo"]);
+
+ // bookmark should be tracked, folder should not be.
+ await verifyTrackedItems([b.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 6);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onItemUntagged() {
+ _("Items untagged using the synchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert tagged bookmarks");
+ let uri = CommonUtils.makeURI("http://getfirefox.com");
+ let fx1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: uri,
+ title: "Get Firefox!",
+ });
+ // Different parent and title; same URL.
+ let fx2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: uri,
+ title: "Download Firefox",
+ });
+ PlacesUtils.tagging.tagURI(uri, ["foo"]);
+
+ await startTracking();
+
+ _("Remove the tag");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ PlacesUtils.tagging.untagURI(uri, ["foo"]);
+
+ await verifyTrackedItems([fx1.guid, fx2.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 4);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 5);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemUntagged() {
+ _("Items untagged using the asynchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert tagged bookmarks");
+ let fxBmk1 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ let fxBmk2 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com",
+ title: "Download Firefox",
+ });
+ let tag = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.tagsGuid,
+ title: "some tag",
+ });
+ let fxTag = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: tag.guid,
+ url: "http://getfirefox.com",
+ });
+
+ await startTracking();
+
+ _("Remove the tag using the async bookmarks API");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.remove(fxTag.guid);
+
+ await verifyTrackedItems([fxBmk1.guid, fxBmk2.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 4);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 5);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemTagged() {
+ _("Items tagged using the asynchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert untagged bookmarks");
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "Folder 1",
+ });
+ let fxBmk1 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: folder1.guid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ let folder2 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "Folder 2",
+ });
+ // Different parent and title; same URL.
+ let fxBmk2 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: folder2.guid,
+ url: "http://getfirefox.com",
+ title: "Download Firefox",
+ });
+
+ await startTracking();
+
+ // This will change once tags are moved into a separate table (bug 424160).
+ // We specifically test this case because Bookmarks.jsm updates tagged
+ // bookmarks and notifies observers.
+ _("Insert a tag using the async bookmarks API");
+ let tag = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.tagsGuid,
+ title: "some tag",
+ });
+
+ _("Tag an item using the async bookmarks API");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: tag.guid,
+ url: "http://getfirefox.com",
+ });
+
+ await verifyTrackedItems([fxBmk1.guid, fxBmk2.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 4);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 5);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemKeywordChanged() {
+ _("Keyword changes via the asynchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert two bookmarks with the same URL");
+ let fxBmk1 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ let fxBmk2 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com",
+ title: "Download Firefox",
+ });
+
+ await startTracking();
+
+ _("Add a keyword for both items");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.keywords.insert({
+ keyword: "the_keyword",
+ url: "http://getfirefox.com",
+ postData: "postData",
+ });
+
+ await verifyTrackedItems([fxBmk1.guid, fxBmk2.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 2);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemKeywordDeleted() {
+ _("Keyword deletions via the asynchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert two bookmarks with the same URL and keywords");
+ let fxBmk1 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ let fxBmk2 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com",
+ title: "Download Firefox",
+ });
+ await PlacesUtils.keywords.insert({
+ keyword: "the_keyword",
+ url: "http://getfirefox.com",
+ });
+
+ await startTracking();
+
+ _("Remove the keyword");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.keywords.remove("the_keyword");
+
+ await verifyTrackedItems([fxBmk1.guid, fxBmk2.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 2);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_bookmarkAdded_filtered_root() {
+ _("Items outside the change roots should not be tracked");
+
+ try {
+ await startTracking();
+
+ _("Create a new root");
+ let root = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.rootGuid,
+ title: "New root",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+ _(`New root GUID: ${root.guid}`);
+
+ _("Insert a bookmark underneath the new root");
+ let untrackedBmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: root.guid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+ _(`New untracked bookmark GUID: ${untrackedBmk.guid}`);
+
+ _("Insert a bookmark underneath the Places root");
+ let rootBmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.rootGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ _(`New Places root bookmark GUID: ${rootBmk.guid}`);
+
+ _("New root and bookmark should be ignored");
+ await verifyTrackedItems([]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onItemDeleted_filtered_root() {
+ _("Deleted items outside the change roots should not be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert a bookmark underneath the Places root");
+ let rootBmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.rootGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ _(`New Places root bookmark GUID: ${rootBmk.guid}`);
+
+ await startTracking();
+
+ await PlacesUtils.bookmarks.remove(rootBmk);
+
+ await verifyTrackedItems([]);
+ // We'll still increment the counter for the removed item.
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onPageAnnoChanged() {
+ _("Page annotations should not be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert a bookmark without an annotation");
+ let pageURI = "http://getfirefox.com";
+ await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: pageURI,
+ title: "Get Firefox!",
+ });
+
+ await startTracking();
+
+ _("Add a page annotation");
+ await PlacesUtils.history.update({
+ url: pageURI,
+ annotations: new Map([[PlacesUtils.CHARSET_ANNO, "UTF-16"]]),
+ });
+ await verifyTrackedItems([]);
+ Assert.equal(tracker.score, 0);
+ await resetTracker();
+
+ _("Remove the page annotation");
+ await PlacesUtils.history.update({
+ url: pageURI,
+ annotations: new Map([[PlacesUtils.CHARSET_ANNO, null]]),
+ });
+ await verifyTrackedItems([]);
+ Assert.equal(tracker.score, 0);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onFaviconChanged() {
+ _("Favicon changes should not be tracked");
+
+ try {
+ await tracker.stop();
+
+ let pageURI = CommonUtils.makeURI("http://getfirefox.com");
+ let iconURI = CommonUtils.makeURI("http://getfirefox.com/icon");
+ await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: pageURI,
+ title: "Get Firefox!",
+ });
+
+ await PlacesTestUtils.addVisits(pageURI);
+
+ await startTracking();
+
+ _("Favicon annotations should be ignored");
+ let iconURL =
+ "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAA" +
+ "AAAA6fptVAAAACklEQVQI12NgAAAAAgAB4iG8MwAAAABJRU5ErkJggg==";
+
+ PlacesUtils.favicons.replaceFaviconDataFromDataURL(
+ iconURI,
+ iconURL,
+ 0,
+ Services.scriptSecurityManager.getSystemPrincipal()
+ );
+
+ await new Promise(resolve => {
+ PlacesUtils.favicons.setAndFetchFaviconForPage(
+ pageURI,
+ iconURI,
+ true,
+ PlacesUtils.favicons.FAVICON_LOAD_NON_PRIVATE,
+ (uri, dataLen, data, mimeType) => {
+ resolve();
+ },
+ Services.scriptSecurityManager.getSystemPrincipal()
+ );
+ });
+ await verifyTrackedItems([]);
+ Assert.equal(tracker.score, 0);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemMoved_moveToFolder() {
+ _("Items moved via `moveToFolder` should be tracked");
+
+ try {
+ await tracker.stop();
+
+ await PlacesUtils.bookmarks.insertTree({
+ guid: PlacesUtils.bookmarks.menuGuid,
+ children: [
+ {
+ guid: "bookmarkAAAA",
+ title: "A",
+ url: "http://example.com/a",
+ },
+ {
+ guid: "bookmarkBBBB",
+ title: "B",
+ url: "http://example.com/b",
+ },
+ {
+ guid: "bookmarkCCCC",
+ title: "C",
+ url: "http://example.com/c",
+ },
+ {
+ guid: "bookmarkDDDD",
+ title: "D",
+ url: "http://example.com/d",
+ },
+ ],
+ });
+ await PlacesUtils.bookmarks.insertTree({
+ guid: PlacesUtils.bookmarks.toolbarGuid,
+ children: [
+ {
+ guid: "bookmarkEEEE",
+ title: "E",
+ url: "http://example.com/e",
+ },
+ ],
+ });
+
+ await startTracking();
+
+ _("Move (A B D) to the toolbar");
+ await PlacesUtils.bookmarks.moveToFolder(
+ ["bookmarkAAAA", "bookmarkBBBB", "bookmarkDDDD"],
+ PlacesUtils.bookmarks.toolbarGuid,
+ PlacesUtils.bookmarks.DEFAULT_INDEX
+ );
+
+ // Moving multiple bookmarks between two folders should track the old
+ // folder, new folder, and moved bookmarks.
+ await verifyTrackedItems([
+ "menu",
+ "toolbar",
+ "bookmarkAAAA",
+ "bookmarkBBBB",
+ "bookmarkDDDD",
+ ]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ await resetTracker();
+
+ _("Reorder toolbar children: (D A B E)");
+ await PlacesUtils.bookmarks.moveToFolder(
+ ["bookmarkDDDD", "bookmarkAAAA", "bookmarkBBBB"],
+ PlacesUtils.bookmarks.toolbarGuid,
+ 0
+ );
+
+ // Reordering bookmarks in a folder should only track the folder, not the
+ // bookmarks.
+ await verifyTrackedItems(["toolbar"]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemMoved_update() {
+ _("Items moved via the asynchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ let tbBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+
+ await startTracking();
+
+ _("Repositioning a bookmark should track the folder");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.update({
+ guid: tbBmk.guid,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ });
+ await verifyTrackedItems(["menu"]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 1);
+ await resetTracker();
+
+ _("Reparenting a bookmark should track both folders and the bookmark");
+ totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.update({
+ guid: tbBmk.guid,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ index: PlacesUtils.bookmarks.DEFAULT_INDEX,
+ });
+ await verifyTrackedItems(["menu", "toolbar", tbBmk.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 3);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemMoved_reorder() {
+ _("Items reordered via the asynchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Insert out-of-order bookmarks");
+ let fxBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ _(`Firefox GUID: ${fxBmk.guid}`);
+
+ let tbBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+ _(`Thunderbird GUID: ${tbBmk.guid}`);
+
+ let mozBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "https://mozilla.org",
+ title: "Mozilla",
+ });
+ _(`Mozilla GUID: ${mozBmk.guid}`);
+
+ await startTracking();
+
+ _("Reorder bookmarks");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.reorder(PlacesUtils.bookmarks.menuGuid, [
+ mozBmk.guid,
+ fxBmk.guid,
+ tbBmk.guid,
+ ]);
+
+ // We only track the folder if we reorder its children, but we should
+ // bump the score for every changed item.
+ await verifyTrackedItems(["menu"]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 1);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onItemDeleted_removeFolderTransaction() {
+ _("Folders removed in a transaction should be tracked");
+
+ try {
+ await tracker.stop();
+
+ _("Create a folder with two children");
+ let folder = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "Test folder",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+ _(`Folder GUID: ${folder.guid}`);
+ let fx = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder.guid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ _(`Firefox GUID: ${fx.guid}`);
+ let tb = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder.guid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+ _(`Thunderbird GUID: ${tb.guid}`);
+
+ await startTracking();
+
+ let txn = PlacesTransactions.Remove({ guid: folder.guid });
+ // We haven't executed the transaction yet.
+ await verifyTrackerEmpty();
+
+ _("Execute the remove folder transaction");
+ await txn.transact();
+ await verifyTrackedItems(["menu", folder.guid, fx.guid, tb.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ await resetTracker();
+
+ _("Undo the remove folder transaction");
+ await PlacesTransactions.undo();
+
+ await verifyTrackedItems(["menu", folder.guid, fx.guid, tb.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ await resetTracker();
+
+ _("Redo the transaction");
+ await PlacesTransactions.redo();
+ await verifyTrackedItems(["menu", folder.guid, fx.guid, tb.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_treeMoved() {
+ _("Moving an entire tree of bookmarks should track the parents");
+
+ try {
+ // Create a couple of parent folders.
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ test: "First test folder",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+
+ // A second folder in the first.
+ let folder2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ title: "Second test folder",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+
+ // Create a couple of bookmarks in the second folder.
+ await PlacesUtils.bookmarks.insert({
+ parentGuid: folder2.guid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ await PlacesUtils.bookmarks.insert({
+ parentGuid: folder2.guid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+
+ await startTracking();
+
+ // Move folder 2 to be a sibling of folder1.
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.update({
+ guid: folder2.guid,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ index: 0,
+ });
+
+ // the menu and both folders should be tracked, the children should not be.
+ await verifyTrackedItems(["menu", folder1.guid, folder2.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 3);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onItemDeleted() {
+ _("Bookmarks deleted via the synchronous API should be tracked");
+
+ try {
+ await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ let tb = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+
+ await startTracking();
+
+ // Delete the last item - the item and parent should be tracked.
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.remove(tb);
+
+ await verifyTrackedItems(["menu", tb.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemDeleted() {
+ _("Bookmarks deleted via the asynchronous API should be tracked");
+
+ try {
+ await tracker.stop();
+
+ let fxBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+
+ await startTracking();
+
+ _("Delete the first item");
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.remove(fxBmk.guid);
+
+ await verifyTrackedItems(["menu", fxBmk.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 2);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_async_onItemDeleted_eraseEverything() {
+ _("Erasing everything should track all deleted items");
+
+ try {
+ await tracker.stop();
+
+ let fxBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.mobileGuid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ _(`Firefox GUID: ${fxBmk.guid}`);
+ let tbBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.mobileGuid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+ _(`Thunderbird GUID: ${tbBmk.guid}`);
+ let mozBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "https://mozilla.org",
+ title: "Mozilla",
+ });
+ _(`Mozilla GUID: ${mozBmk.guid}`);
+ let mdnBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "https://developer.mozilla.org",
+ title: "MDN",
+ });
+ _(`MDN GUID: ${mdnBmk.guid}`);
+ let bugsFolder = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ title: "Bugs",
+ });
+ _(`Bugs folder GUID: ${bugsFolder.guid}`);
+ let bzBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: bugsFolder.guid,
+ url: "https://bugzilla.mozilla.org",
+ title: "Bugzilla",
+ });
+ _(`Bugzilla GUID: ${bzBmk.guid}`);
+ let bugsChildFolder = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: bugsFolder.guid,
+ title: "Bugs child",
+ });
+ _(`Bugs child GUID: ${bugsChildFolder.guid}`);
+ let bugsGrandChildBmk = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
+ parentGuid: bugsChildFolder.guid,
+ url: "https://example.com",
+ title: "Bugs grandchild",
+ });
+ _(`Bugs grandchild GUID: ${bugsGrandChildBmk.guid}`);
+
+ await startTracking();
+ // Simulate moving a synced item into a new folder. Deleting the folder
+ // should write a tombstone for the item, but not the folder.
+ await PlacesTestUtils.setBookmarkSyncFields({
+ guid: bugsChildFolder.guid,
+ syncStatus: PlacesUtils.bookmarks.SYNC_STATUS.NEW,
+ });
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.eraseEverything();
+
+ // bugsChildFolder's sync status is still "NEW", so it shouldn't be
+ // tracked. bugsGrandChildBmk is "NORMAL", so we *should* write a
+ // tombstone and track it.
+ await verifyTrackedItems([
+ "menu",
+ mozBmk.guid,
+ mdnBmk.guid,
+ "toolbar",
+ bugsFolder.guid,
+ "mobile",
+ fxBmk.guid,
+ tbBmk.guid,
+ "unfiled",
+ bzBmk.guid,
+ bugsGrandChildBmk.guid,
+ ]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 8);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 11);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
+
+add_task(async function test_onItemDeleted_tree() {
+ _("Deleting a tree of bookmarks should track all items");
+
+ try {
+ // Create a couple of parent folders.
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ title: "First test folder",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+
+ // A second folder in the first.
+ let folder2 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ title: "Second test folder",
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+
+ // Create a couple of bookmarks in the second folder.
+ let fx = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder2.guid,
+ url: "http://getfirefox.com",
+ title: "Get Firefox!",
+ });
+ let tb = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder2.guid,
+ url: "http://getthunderbird.com",
+ title: "Get Thunderbird!",
+ });
+
+ await startTracking();
+
+ // Delete folder2 - everything we created should be tracked.
+ let totalSyncChanges = PlacesUtils.bookmarks.totalSyncChanges;
+ await PlacesUtils.bookmarks.remove(folder2);
+
+ await verifyTrackedItems([fx.guid, tb.guid, folder1.guid, folder2.guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ Assert.equal(PlacesUtils.bookmarks.totalSyncChanges, totalSyncChanges + 4);
+ } finally {
+ _("Clean up.");
+ await cleanup();
+ }
+});
diff --git a/services/sync/tests/unit/test_bridged_engine.js b/services/sync/tests/unit/test_bridged_engine.js
new file mode 100644
index 0000000000..25a81f8f69
--- /dev/null
+++ b/services/sync/tests/unit/test_bridged_engine.js
@@ -0,0 +1,248 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { BridgedEngine, BridgeWrapperXPCOM } = ChromeUtils.importESModule(
+ "resource://services-sync/bridged_engine.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+// Wraps an `object` in a proxy so that its methods are bound to it. This
+// simulates how XPCOM class instances have all their methods bound.
+function withBoundMethods(object) {
+ return new Proxy(object, {
+ get(target, key) {
+ let value = target[key];
+ return typeof value == "function" ? value.bind(target) : value;
+ },
+ });
+}
+
+add_task(async function test_interface() {
+ class TestBridge {
+ constructor() {
+ this.storageVersion = 2;
+ this.syncID = "syncID111111";
+ this.clear();
+ }
+
+ clear() {
+ this.lastSyncMillis = 0;
+ this.wasSyncStarted = false;
+ this.incomingEnvelopes = [];
+ this.uploadedIDs = [];
+ this.wasSyncFinished = false;
+ this.wasReset = false;
+ this.wasWiped = false;
+ }
+
+ // `mozIBridgedSyncEngine` methods.
+
+ getLastSync(callback) {
+ CommonUtils.nextTick(() => callback.handleSuccess(this.lastSyncMillis));
+ }
+
+ setLastSync(millis, callback) {
+ this.lastSyncMillis = millis;
+ CommonUtils.nextTick(() => callback.handleSuccess());
+ }
+
+ resetSyncId(callback) {
+ CommonUtils.nextTick(() => callback.handleSuccess(this.syncID));
+ }
+
+ ensureCurrentSyncId(newSyncId, callback) {
+ equal(newSyncId, this.syncID, "Local and new sync IDs should match");
+ CommonUtils.nextTick(() => callback.handleSuccess(this.syncID));
+ }
+
+ syncStarted(callback) {
+ this.wasSyncStarted = true;
+ CommonUtils.nextTick(() => callback.handleSuccess());
+ }
+
+ storeIncoming(envelopes, callback) {
+ this.incomingEnvelopes.push(...envelopes.map(r => JSON.parse(r)));
+ CommonUtils.nextTick(() => callback.handleSuccess());
+ }
+
+ apply(callback) {
+ let outgoingEnvelopes = [
+ {
+ id: "hanson",
+ data: {
+ plants: ["seed", "flower 💐", "rose"],
+ canYouTell: false,
+ },
+ },
+ {
+ id: "sheryl-crow",
+ data: {
+ today: "winding 🛣",
+ tomorrow: "winding 🛣",
+ },
+ },
+ ].map(cleartext =>
+ JSON.stringify({
+ id: cleartext.id,
+ payload: JSON.stringify(cleartext),
+ })
+ );
+ CommonUtils.nextTick(() => callback.handleSuccess(outgoingEnvelopes));
+ }
+
+ setUploaded(millis, ids, callback) {
+ this.uploadedIDs.push(...ids);
+ CommonUtils.nextTick(() => callback.handleSuccess());
+ }
+
+ syncFinished(callback) {
+ this.wasSyncFinished = true;
+ CommonUtils.nextTick(() => callback.handleSuccess());
+ }
+
+ reset(callback) {
+ this.clear();
+ this.wasReset = true;
+ CommonUtils.nextTick(() => callback.handleSuccess());
+ }
+
+ wipe(callback) {
+ this.clear();
+ this.wasWiped = true;
+ CommonUtils.nextTick(() => callback.handleSuccess());
+ }
+ }
+
+ let bridge = new TestBridge();
+ let engine = new BridgedEngine("Nineties", Service);
+ engine._bridge = new BridgeWrapperXPCOM(withBoundMethods(bridge));
+ engine.enabled = true;
+
+ let server = await serverForFoo(engine);
+ try {
+ await SyncTestingInfrastructure(server);
+
+ info("Add server records");
+ let foo = server.user("foo");
+ let collection = foo.collection("nineties");
+ let now = new_timestamp();
+ collection.insert(
+ "backstreet",
+ encryptPayload({
+ id: "backstreet",
+ data: {
+ say: "I want it that way",
+ when: "never",
+ },
+ }),
+ now
+ );
+ collection.insert(
+ "tlc",
+ encryptPayload({
+ id: "tlc",
+ data: {
+ forbidden: ["scrubs 🚫"],
+ numberAvailable: false,
+ },
+ }),
+ now + 5
+ );
+
+ info("Sync the engine");
+ // Advance the last sync time to skip the Backstreet Boys...
+ bridge.lastSyncMillis = 1000 * (now + 2);
+ await sync_engine_and_validate_telem(engine, false);
+
+ let metaGlobal = foo.collection("meta").wbo("global").get();
+ deepEqual(
+ JSON.parse(metaGlobal.payload).engines.nineties,
+ {
+ version: 2,
+ syncID: "syncID111111",
+ },
+ "Should write storage version and sync ID to m/g"
+ );
+
+ greater(bridge.lastSyncMillis, 0, "Should update last sync time");
+ ok(
+ bridge.wasSyncStarted,
+ "Should have started sync before storing incoming"
+ );
+ deepEqual(
+ bridge.incomingEnvelopes
+ .sort((a, b) => a.id.localeCompare(b.id))
+ .map(({ payload, ...envelope }) => ({
+ cleartextAsObject: JSON.parse(payload),
+ ...envelope,
+ })),
+ [
+ {
+ id: "tlc",
+ modified: now + 5,
+ cleartextAsObject: {
+ id: "tlc",
+ data: {
+ forbidden: ["scrubs 🚫"],
+ numberAvailable: false,
+ },
+ },
+ },
+ ],
+ "Should stage incoming records from server"
+ );
+ deepEqual(
+ bridge.uploadedIDs.sort(),
+ ["hanson", "sheryl-crow"],
+ "Should mark new local records as uploaded"
+ );
+ ok(bridge.wasSyncFinished, "Should have finished sync after uploading");
+
+ deepEqual(
+ collection.keys().sort(),
+ ["backstreet", "hanson", "sheryl-crow", "tlc"],
+ "Should have all records on server"
+ );
+ let expectedRecords = [
+ {
+ id: "sheryl-crow",
+ data: {
+ today: "winding 🛣",
+ tomorrow: "winding 🛣",
+ },
+ },
+ {
+ id: "hanson",
+ data: {
+ plants: ["seed", "flower 💐", "rose"],
+ canYouTell: false,
+ },
+ },
+ ];
+ for (let expected of expectedRecords) {
+ let actual = collection.cleartext(expected.id);
+ deepEqual(
+ actual,
+ expected,
+ `Should upload record ${expected.id} from bridged engine`
+ );
+ }
+
+ await engine.resetClient();
+ ok(bridge.wasReset, "Should reset local storage for bridge");
+
+ await engine.wipeClient();
+ ok(bridge.wasWiped, "Should wipe local storage for bridge");
+
+ await engine.resetSyncID();
+ ok(
+ !foo.collection("nineties"),
+ "Should delete server collection after resetting sync ID"
+ );
+ } finally {
+ await promiseStopServer(server);
+ await engine.finalize();
+ }
+});
diff --git a/services/sync/tests/unit/test_clients_engine.js b/services/sync/tests/unit/test_clients_engine.js
new file mode 100644
index 0000000000..d910a67503
--- /dev/null
+++ b/services/sync/tests/unit/test_clients_engine.js
@@ -0,0 +1,2108 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { ClientEngine, ClientsRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/clients.sys.mjs"
+);
+const { CryptoWrapper } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+const MORE_THAN_CLIENTS_TTL_REFRESH = 691200; // 8 days
+const LESS_THAN_CLIENTS_TTL_REFRESH = 86400; // 1 day
+
+let engine;
+
+/**
+ * Unpack the record with this ID, and verify that it has the same version that
+ * we should be putting into records.
+ */
+async function check_record_version(user, id) {
+ let payload = user.collection("clients").wbo(id).data;
+
+ let rec = new CryptoWrapper();
+ rec.id = id;
+ rec.collection = "clients";
+ rec.ciphertext = payload.ciphertext;
+ rec.hmac = payload.hmac;
+ rec.IV = payload.IV;
+
+ let cleartext = await rec.decrypt(
+ Service.collectionKeys.keyForCollection("clients")
+ );
+
+ _("Payload is " + JSON.stringify(cleartext));
+ equal(Services.appinfo.version, cleartext.version);
+ equal(1, cleartext.protocols.length);
+ equal("1.5", cleartext.protocols[0]);
+}
+
+// compare 2 different command arrays, taking into account that a flowID
+// attribute must exist, be unique in the commands, but isn't specified in
+// "expected" as the value isn't known.
+function compareCommands(actual, expected, description) {
+ let tweakedActual = JSON.parse(JSON.stringify(actual));
+ tweakedActual.map(elt => delete elt.flowID);
+ deepEqual(tweakedActual, expected, description);
+ // each item must have a unique flowID.
+ let allIDs = new Set(actual.map(elt => elt.flowID).filter(fid => !!fid));
+ equal(allIDs.size, actual.length, "all items have unique IDs");
+}
+
+async function syncClientsEngine(server) {
+ engine._lastFxADevicesFetch = 0;
+ engine.lastModified = server.getCollection("foo", "clients").timestamp;
+ await engine._sync();
+}
+
+add_task(async function setup() {
+ engine = Service.clientsEngine;
+});
+
+async function cleanup() {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await engine._tracker.clearChangedIDs();
+ await engine._resetClient();
+ // un-cleanup the logs (the resetBranch will have reset their levels), since
+ // not all the tests use SyncTestingInfrastructure, and it's cheap.
+ syncTestLogging();
+ // We don't finalize storage at cleanup, since we use the same clients engine
+ // instance across all tests.
+}
+
+add_task(async function test_bad_hmac() {
+ _("Ensure that Clients engine deletes corrupt records.");
+ let deletedCollections = [];
+ let deletedItems = [];
+ let callback = {
+ onItemDeleted(username, coll, wboID) {
+ deletedItems.push(coll + "/" + wboID);
+ },
+ onCollectionDeleted(username, coll) {
+ deletedCollections.push(coll);
+ },
+ };
+ Object.setPrototypeOf(callback, SyncServerCallback);
+ let server = await serverForFoo(engine, callback);
+ let user = server.user("foo");
+
+ function check_clients_count(expectedCount) {
+ let coll = user.collection("clients");
+
+ // Treat a non-existent collection as empty.
+ equal(expectedCount, coll ? coll.count() : 0);
+ }
+
+ function check_client_deleted(id) {
+ let coll = user.collection("clients");
+ let wbo = coll.wbo(id);
+ return !wbo || !wbo.payload;
+ }
+
+ async function uploadNewKeys() {
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ ok(
+ (await serverKeys.upload(Service.resource(Service.cryptoKeysURL))).success
+ );
+ }
+
+ try {
+ await configureIdentity({ username: "foo" }, server);
+ await Service.login();
+
+ await generateNewKeys(Service.collectionKeys);
+
+ _("First sync, client record is uploaded");
+ equal(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+ check_clients_count(0);
+ await syncClientsEngine(server);
+ check_clients_count(1);
+ ok(engine.lastRecordUpload > 0);
+ ok(!engine.isFirstSync);
+
+ // Our uploaded record has a version.
+ await check_record_version(user, engine.localID);
+
+ // Initial setup can wipe the server, so clean up.
+ deletedCollections = [];
+ deletedItems = [];
+
+ _("Change our keys and our client ID, reupload keys.");
+ let oldLocalID = engine.localID; // Preserve to test for deletion!
+ engine.localID = Utils.makeGUID();
+ await engine.resetClient();
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ ok(
+ (await serverKeys.upload(Service.resource(Service.cryptoKeysURL))).success
+ );
+
+ _("Sync.");
+ await syncClientsEngine(server);
+
+ _("Old record " + oldLocalID + " was deleted, new one uploaded.");
+ check_clients_count(1);
+ check_client_deleted(oldLocalID);
+
+ _(
+ "Now change our keys but don't upload them. " +
+ "That means we get an HMAC error but redownload keys."
+ );
+ Service.lastHMACEvent = 0;
+ engine.localID = Utils.makeGUID();
+ await engine.resetClient();
+ await generateNewKeys(Service.collectionKeys);
+ deletedCollections = [];
+ deletedItems = [];
+ check_clients_count(1);
+ await syncClientsEngine(server);
+
+ _("Old record was not deleted, new one uploaded.");
+ equal(deletedCollections.length, 0);
+ equal(deletedItems.length, 0);
+ check_clients_count(2);
+
+ _(
+ "Now try the scenario where our keys are wrong *and* there's a bad record."
+ );
+ // Clean up and start fresh.
+ user.collection("clients")._wbos = {};
+ Service.lastHMACEvent = 0;
+ engine.localID = Utils.makeGUID();
+ await engine.resetClient();
+ deletedCollections = [];
+ deletedItems = [];
+ check_clients_count(0);
+
+ await uploadNewKeys();
+
+ // Sync once to upload a record.
+ await syncClientsEngine(server);
+ check_clients_count(1);
+
+ // Generate and upload new keys, so the old client record is wrong.
+ await uploadNewKeys();
+
+ // Create a new client record and new keys. Now our keys are wrong, as well
+ // as the object on the server. We'll download the new keys and also delete
+ // the bad client record.
+ oldLocalID = engine.localID; // Preserve to test for deletion!
+ engine.localID = Utils.makeGUID();
+ await engine.resetClient();
+ await generateNewKeys(Service.collectionKeys);
+ let oldKey = Service.collectionKeys.keyForCollection();
+
+ equal(deletedCollections.length, 0);
+ equal(deletedItems.length, 0);
+ await syncClientsEngine(server);
+ equal(deletedItems.length, 1);
+ check_client_deleted(oldLocalID);
+ check_clients_count(1);
+ let newKey = Service.collectionKeys.keyForCollection();
+ ok(!oldKey.equals(newKey));
+ } finally {
+ await cleanup();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_properties() {
+ _("Test lastRecordUpload property");
+ try {
+ equal(
+ Svc.PrefBranch.getPrefType("clients.lastRecordUpload"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ equal(engine.lastRecordUpload, 0);
+
+ let now = Date.now();
+ engine.lastRecordUpload = now / 1000;
+ equal(engine.lastRecordUpload, Math.floor(now / 1000));
+ } finally {
+ await cleanup();
+ }
+});
+
+add_task(async function test_full_sync() {
+ _("Ensure that Clients engine fetches all records for each sync.");
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+ let user = server.user("foo");
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let activeID = Utils.makeGUID();
+ user.collection("clients").insertRecord(
+ {
+ id: activeID,
+ name: "Active client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ let deletedID = Utils.makeGUID();
+ user.collection("clients").insertRecord(
+ {
+ id: deletedID,
+ name: "Client to delete",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ try {
+ let store = engine._store;
+
+ _("First sync. 2 records downloaded; our record uploaded.");
+ strictEqual(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+ await syncClientsEngine(server);
+ ok(engine.lastRecordUpload > 0);
+ ok(!engine.isFirstSync);
+ deepEqual(
+ user.collection("clients").keys().sort(),
+ [activeID, deletedID, engine.localID].sort(),
+ "Our record should be uploaded on first sync"
+ );
+ let ids = await store.getAllIDs();
+ deepEqual(
+ Object.keys(ids).sort(),
+ [activeID, deletedID, engine.localID].sort(),
+ "Other clients should be downloaded on first sync"
+ );
+
+ _("Delete a record, then sync again");
+ let collection = server.getCollection("foo", "clients");
+ collection.remove(deletedID);
+ // Simulate a timestamp update in info/collections.
+ await syncClientsEngine(server);
+
+ _("Record should be updated");
+ ids = await store.getAllIDs();
+ deepEqual(
+ Object.keys(ids).sort(),
+ [activeID, engine.localID].sort(),
+ "Deleted client should be removed on next sync"
+ );
+ } finally {
+ await cleanup();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_sync() {
+ _("Ensure that Clients engine uploads a new client record once a week.");
+
+ let server = await serverForFoo(engine);
+ let user = server.user("foo");
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ function clientWBO() {
+ return user.collection("clients").wbo(engine.localID);
+ }
+
+ try {
+ _("First sync. Client record is uploaded.");
+ equal(clientWBO(), undefined);
+ equal(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+ await syncClientsEngine(server);
+ ok(!!clientWBO().payload);
+ ok(engine.lastRecordUpload > 0);
+ ok(!engine.isFirstSync);
+
+ _(
+ "Let's time travel more than a week back, new record should've been uploaded."
+ );
+ engine.lastRecordUpload -= MORE_THAN_CLIENTS_TTL_REFRESH;
+ let lastweek = engine.lastRecordUpload;
+ clientWBO().payload = undefined;
+ await syncClientsEngine(server);
+ ok(!!clientWBO().payload);
+ ok(engine.lastRecordUpload > lastweek);
+ ok(!engine.isFirstSync);
+
+ _("Remove client record.");
+ await engine.removeClientData();
+ equal(clientWBO().payload, undefined);
+
+ _("Time travel one day back, no record uploaded.");
+ engine.lastRecordUpload -= LESS_THAN_CLIENTS_TTL_REFRESH;
+ let yesterday = engine.lastRecordUpload;
+ await syncClientsEngine(server);
+ equal(clientWBO().payload, undefined);
+ equal(engine.lastRecordUpload, yesterday);
+ ok(!engine.isFirstSync);
+ } finally {
+ await cleanup();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_client_name_change() {
+ _("Ensure client name change incurs a client record update.");
+
+ let tracker = engine._tracker;
+
+ engine.localID; // Needed to increase the tracker changedIDs count.
+ let initialName = engine.localName;
+
+ tracker.start();
+ _("initial name: " + initialName);
+
+ // Tracker already has data, so clear it.
+ await tracker.clearChangedIDs();
+
+ let initialScore = tracker.score;
+
+ let changedIDs = await tracker.getChangedIDs();
+ equal(Object.keys(changedIDs).length, 0);
+
+ Services.prefs.setStringPref(
+ "identity.fxaccounts.account.device.name",
+ "new name"
+ );
+ await tracker.asyncObserver.promiseObserversComplete();
+
+ _("new name: " + engine.localName);
+ notEqual(initialName, engine.localName);
+ changedIDs = await tracker.getChangedIDs();
+ equal(Object.keys(changedIDs).length, 1);
+ ok(engine.localID in changedIDs);
+ ok(tracker.score > initialScore);
+ ok(tracker.score >= SCORE_INCREMENT_XLARGE);
+
+ await tracker.stop();
+
+ await cleanup();
+});
+
+add_task(async function test_fxa_device_id_change() {
+ _("Ensure an FxA device ID change incurs a client record update.");
+
+ let tracker = engine._tracker;
+
+ engine.localID; // Needed to increase the tracker changedIDs count.
+
+ tracker.start();
+
+ // Tracker already has data, so clear it.
+ await tracker.clearChangedIDs();
+
+ let initialScore = tracker.score;
+
+ let changedIDs = await tracker.getChangedIDs();
+ equal(Object.keys(changedIDs).length, 0);
+
+ Services.obs.notifyObservers(null, "fxaccounts:new_device_id");
+ await tracker.asyncObserver.promiseObserversComplete();
+
+ changedIDs = await tracker.getChangedIDs();
+ equal(Object.keys(changedIDs).length, 1);
+ ok(engine.localID in changedIDs);
+ ok(tracker.score > initialScore);
+ ok(tracker.score >= SINGLE_USER_THRESHOLD);
+
+ await tracker.stop();
+
+ await cleanup();
+});
+
+add_task(async function test_last_modified() {
+ _("Ensure that remote records have a sane serverLastModified attribute.");
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+ let user = server.user("foo");
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let activeID = Utils.makeGUID();
+ user.collection("clients").insertRecord(
+ {
+ id: activeID,
+ name: "Active client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ try {
+ let collection = user.collection("clients");
+
+ _("Sync to download the record");
+ await syncClientsEngine(server);
+
+ equal(
+ engine._store._remoteClients[activeID].serverLastModified,
+ now - 10,
+ "last modified in the local record is correctly the server last-modified"
+ );
+
+ _("Modify the record and re-upload it");
+ // set a new name to make sure we really did upload.
+ engine._store._remoteClients[activeID].name = "New name";
+ engine._modified.set(activeID, 0);
+ // The sync above also did a POST, so adjust our lastModified.
+ engine.lastModified = server.getCollection("foo", "clients").timestamp;
+ await engine._uploadOutgoing();
+
+ _("Local record should have updated timestamp");
+ ok(engine._store._remoteClients[activeID].serverLastModified >= now);
+
+ _("Record on the server should have new name but not serverLastModified");
+ let payload = collection.cleartext(activeID);
+ equal(payload.name, "New name");
+ equal(payload.serverLastModified, undefined);
+ } finally {
+ await cleanup();
+ server.deleteCollections("foo");
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_send_command() {
+ _("Verifies _sendCommandToClient puts commands in the outbound queue.");
+
+ let store = engine._store;
+ let tracker = engine._tracker;
+ let remoteId = Utils.makeGUID();
+ let rec = new ClientsRec("clients", remoteId);
+
+ await store.create(rec);
+ await store.createRecord(remoteId, "clients");
+
+ let action = "testCommand";
+ let args = ["foo", "bar"];
+ let extra = { flowID: "flowy" };
+
+ await engine._sendCommandToClient(action, args, remoteId, extra);
+
+ let newRecord = store._remoteClients[remoteId];
+ let clientCommands = (await engine._readCommands())[remoteId];
+ notEqual(newRecord, undefined);
+ equal(clientCommands.length, 1);
+
+ let command = clientCommands[0];
+ equal(command.command, action);
+ equal(command.args.length, 2);
+ deepEqual(command.args, args);
+ ok(command.flowID);
+
+ const changes = await tracker.getChangedIDs();
+ notEqual(changes[remoteId], undefined);
+
+ await cleanup();
+});
+
+// The browser UI might call _addClientCommand indirectly without awaiting on the returned promise.
+// We need to make sure this doesn't result on commands not being saved.
+add_task(async function test_add_client_command_race() {
+ let promises = [];
+ for (let i = 0; i < 100; i++) {
+ promises.push(
+ engine._addClientCommand(`client-${i}`, { command: "cmd", args: [] })
+ );
+ }
+ await Promise.all(promises);
+
+ let localCommands = await engine._readCommands();
+ for (let i = 0; i < 100; i++) {
+ equal(localCommands[`client-${i}`].length, 1);
+ }
+});
+
+add_task(async function test_command_validation() {
+ _("Verifies that command validation works properly.");
+
+ let store = engine._store;
+
+ let testCommands = [
+ ["resetAll", [], true],
+ ["resetAll", ["foo"], false],
+ ["resetEngine", ["tabs"], true],
+ ["resetEngine", [], false],
+ ["wipeEngine", ["tabs"], true],
+ ["wipeEngine", [], false],
+ ["logout", [], true],
+ ["logout", ["foo"], false],
+ ["__UNKNOWN__", [], false],
+ ];
+
+ for (let [action, args, expectedResult] of testCommands) {
+ let remoteId = Utils.makeGUID();
+ let rec = new ClientsRec("clients", remoteId);
+
+ await store.create(rec);
+ await store.createRecord(remoteId, "clients");
+
+ await engine.sendCommand(action, args, remoteId);
+
+ let newRecord = store._remoteClients[remoteId];
+ notEqual(newRecord, undefined);
+
+ let clientCommands = (await engine._readCommands())[remoteId];
+
+ if (expectedResult) {
+ _("Ensuring command is sent: " + action);
+ equal(clientCommands.length, 1);
+
+ let command = clientCommands[0];
+ equal(command.command, action);
+ deepEqual(command.args, args);
+
+ notEqual(engine._tracker, undefined);
+ const changes = await engine._tracker.getChangedIDs();
+ notEqual(changes[remoteId], undefined);
+ } else {
+ _("Ensuring command is scrubbed: " + action);
+ equal(clientCommands, undefined);
+
+ if (store._tracker) {
+ equal(engine._tracker[remoteId], undefined);
+ }
+ }
+ }
+ await cleanup();
+});
+
+add_task(async function test_command_duplication() {
+ _("Ensures duplicate commands are detected and not added");
+
+ let store = engine._store;
+ let remoteId = Utils.makeGUID();
+ let rec = new ClientsRec("clients", remoteId);
+ await store.create(rec);
+ await store.createRecord(remoteId, "clients");
+
+ let action = "resetAll";
+ let args = [];
+
+ await engine.sendCommand(action, args, remoteId);
+ await engine.sendCommand(action, args, remoteId);
+
+ let clientCommands = (await engine._readCommands())[remoteId];
+ equal(clientCommands.length, 1);
+
+ _("Check variant args length");
+ await engine._saveCommands({});
+
+ action = "resetEngine";
+ await engine.sendCommand(action, [{ x: "foo" }], remoteId);
+ await engine.sendCommand(action, [{ x: "bar" }], remoteId);
+
+ _("Make sure we spot a real dupe argument.");
+ await engine.sendCommand(action, [{ x: "bar" }], remoteId);
+
+ clientCommands = (await engine._readCommands())[remoteId];
+ equal(clientCommands.length, 2);
+
+ await cleanup();
+});
+
+add_task(async function test_command_invalid_client() {
+ _("Ensures invalid client IDs are caught");
+
+ let id = Utils.makeGUID();
+ let error;
+
+ try {
+ await engine.sendCommand("wipeEngine", ["tabs"], id);
+ } catch (ex) {
+ error = ex;
+ }
+
+ equal(error.message.indexOf("Unknown remote client ID: "), 0);
+
+ await cleanup();
+});
+
+add_task(async function test_process_incoming_commands() {
+ _("Ensures local commands are executed");
+
+ engine.localCommands = [{ command: "logout", args: [] }];
+
+ let ev = "weave:service:logout:finish";
+
+ let logoutPromise = new Promise(resolve => {
+ var handler = function () {
+ Svc.Obs.remove(ev, handler);
+
+ resolve();
+ };
+
+ Svc.Obs.add(ev, handler);
+ });
+
+ // logout command causes processIncomingCommands to return explicit false.
+ ok(!(await engine.processIncomingCommands()));
+
+ await logoutPromise;
+
+ await cleanup();
+});
+
+add_task(async function test_filter_duplicate_names() {
+ _(
+ "Ensure that we exclude clients with identical names that haven't synced in a week."
+ );
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+ let user = server.user("foo");
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ // Synced recently.
+ let recentID = Utils.makeGUID();
+ user.collection("clients").insertRecord(
+ {
+ id: recentID,
+ name: "My Phone",
+ type: "mobile",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ // Dupe of our client, synced more than 1 week ago.
+ let dupeID = Utils.makeGUID();
+ user.collection("clients").insertRecord(
+ {
+ id: dupeID,
+ name: engine.localName,
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 604820
+ );
+
+ // Synced more than 1 week ago, but not a dupe.
+ let oldID = Utils.makeGUID();
+ user.collection("clients").insertRecord(
+ {
+ id: oldID,
+ name: "My old desktop",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 604820
+ );
+
+ try {
+ let store = engine._store;
+
+ _("First sync");
+ strictEqual(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+ await syncClientsEngine(server);
+ ok(engine.lastRecordUpload > 0);
+ ok(!engine.isFirstSync);
+ deepEqual(
+ user.collection("clients").keys().sort(),
+ [recentID, dupeID, oldID, engine.localID].sort(),
+ "Our record should be uploaded on first sync"
+ );
+
+ let ids = await store.getAllIDs();
+ deepEqual(
+ Object.keys(ids).sort(),
+ [recentID, dupeID, oldID, engine.localID].sort(),
+ "Duplicate ID should remain in getAllIDs"
+ );
+ ok(
+ await engine._store.itemExists(dupeID),
+ "Dupe ID should be considered as existing for Sync methods."
+ );
+ ok(
+ !engine.remoteClientExists(dupeID),
+ "Dupe ID should not be considered as existing for external methods."
+ );
+
+ // dupe desktop should not appear in .deviceTypes.
+ equal(engine.deviceTypes.get("desktop"), 2);
+ equal(engine.deviceTypes.get("mobile"), 1);
+
+ // dupe desktop should not appear in stats
+ deepEqual(engine.stats, {
+ hasMobile: 1,
+ names: [engine.localName, "My Phone", "My old desktop"],
+ numClients: 3,
+ });
+
+ ok(engine.remoteClientExists(oldID), "non-dupe ID should exist.");
+ ok(!engine.remoteClientExists(dupeID), "dupe ID should not exist");
+ equal(
+ engine.remoteClients.length,
+ 2,
+ "dupe should not be in remoteClients"
+ );
+
+ // Check that a subsequent Sync doesn't report anything as being processed.
+ let counts;
+ Svc.Obs.add("weave:engine:sync:applied", function observe(subject, data) {
+ Svc.Obs.remove("weave:engine:sync:applied", observe);
+ counts = subject;
+ });
+
+ await syncClientsEngine(server);
+ equal(counts.applied, 0); // We didn't report applying any records.
+ equal(counts.reconciled, 4); // We reported reconcilliation for all records
+ equal(counts.succeeded, 0);
+ equal(counts.failed, 0);
+ equal(counts.newFailed, 0);
+
+ _("Broadcast logout to all clients");
+ await engine.sendCommand("logout", []);
+ await syncClientsEngine(server);
+
+ let collection = server.getCollection("foo", "clients");
+ let recentPayload = collection.cleartext(recentID);
+ compareCommands(
+ recentPayload.commands,
+ [{ command: "logout", args: [] }],
+ "Should send commands to the recent client"
+ );
+
+ let oldPayload = collection.cleartext(oldID);
+ compareCommands(
+ oldPayload.commands,
+ [{ command: "logout", args: [] }],
+ "Should send commands to the week-old client"
+ );
+
+ let dupePayload = collection.cleartext(dupeID);
+ deepEqual(
+ dupePayload.commands,
+ [],
+ "Should not send commands to the dupe client"
+ );
+
+ _("Update the dupe client's modified time");
+ collection.insertRecord(
+ {
+ id: dupeID,
+ name: engine.localName,
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ _("Second sync.");
+ await syncClientsEngine(server);
+
+ ids = await store.getAllIDs();
+ deepEqual(
+ Object.keys(ids).sort(),
+ [recentID, oldID, dupeID, engine.localID].sort(),
+ "Stale client synced, so it should no longer be marked as a dupe"
+ );
+
+ ok(
+ engine.remoteClientExists(dupeID),
+ "Dupe ID should appear as it synced."
+ );
+
+ // Recently synced dupe desktop should appear in .deviceTypes.
+ equal(engine.deviceTypes.get("desktop"), 3);
+
+ // Recently synced dupe desktop should now appear in stats
+ deepEqual(engine.stats, {
+ hasMobile: 1,
+ names: [engine.localName, "My Phone", engine.localName, "My old desktop"],
+ numClients: 4,
+ });
+
+ ok(
+ engine.remoteClientExists(dupeID),
+ "recently synced dupe ID should now exist"
+ );
+ equal(
+ engine.remoteClients.length,
+ 3,
+ "recently synced dupe should now be in remoteClients"
+ );
+ } finally {
+ await cleanup();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_command_sync() {
+ _("Ensure that commands are synced across clients.");
+
+ await engine._store.wipe();
+ await generateNewKeys(Service.collectionKeys);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let user = server.user("foo");
+ let remoteId = Utils.makeGUID();
+
+ function clientWBO(id) {
+ return user.collection("clients").wbo(id);
+ }
+
+ _("Create remote client record");
+ user.collection("clients").insertRecord({
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ });
+
+ try {
+ _("Syncing.");
+ await syncClientsEngine(server);
+
+ _("Checking remote record was downloaded.");
+ let clientRecord = engine._store._remoteClients[remoteId];
+ notEqual(clientRecord, undefined);
+ equal(clientRecord.commands.length, 0);
+
+ _("Send a command to the remote client.");
+ await engine.sendCommand("wipeEngine", ["tabs"]);
+ let clientCommands = (await engine._readCommands())[remoteId];
+ equal(clientCommands.length, 1);
+ await syncClientsEngine(server);
+
+ _("Checking record was uploaded.");
+ notEqual(clientWBO(engine.localID).payload, undefined);
+ ok(engine.lastRecordUpload > 0);
+ ok(!engine.isFirstSync);
+
+ notEqual(clientWBO(remoteId).payload, undefined);
+
+ Svc.PrefBranch.setStringPref("client.GUID", remoteId);
+ await engine._resetClient();
+ equal(engine.localID, remoteId);
+ _("Performing sync on resetted client.");
+ await syncClientsEngine(server);
+ notEqual(engine.localCommands, undefined);
+ equal(engine.localCommands.length, 1);
+
+ let command = engine.localCommands[0];
+ equal(command.command, "wipeEngine");
+ equal(command.args.length, 1);
+ equal(command.args[0], "tabs");
+ } finally {
+ await cleanup();
+
+ try {
+ let collection = server.getCollection("foo", "clients");
+ collection.remove(remoteId);
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_clients_not_in_fxa_list() {
+ _("Ensure that clients not in the FxA devices list are marked as stale.");
+
+ await engine._store.wipe();
+ await generateNewKeys(Service.collectionKeys);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let remoteId = Utils.makeGUID();
+ let remoteId2 = Utils.makeGUID();
+ let collection = server.getCollection("foo", "clients");
+
+ _("Create remote client records");
+ collection.insertRecord({
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ fxaDeviceId: remoteId,
+ protocols: ["1.5"],
+ });
+
+ collection.insertRecord({
+ id: remoteId2,
+ name: "Remote client 2",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ fxaDeviceId: remoteId2,
+ protocols: ["1.5"],
+ });
+
+ let fxAccounts = engine.fxAccounts;
+ engine.fxAccounts = {
+ notifyDevices() {
+ return Promise.resolve(true);
+ },
+ device: {
+ getLocalId() {
+ return fxAccounts.device.getLocalId();
+ },
+ getLocalName() {
+ return fxAccounts.device.getLocalName();
+ },
+ getLocalType() {
+ return fxAccounts.device.getLocalType();
+ },
+ recentDeviceList: [{ id: remoteId }],
+ refreshDeviceList() {
+ return Promise.resolve(true);
+ },
+ },
+ _internal: {
+ now() {
+ return Date.now();
+ },
+ },
+ };
+
+ try {
+ _("Syncing.");
+ await syncClientsEngine(server);
+
+ ok(!engine._store._remoteClients[remoteId].stale);
+ ok(engine._store._remoteClients[remoteId2].stale);
+ } finally {
+ engine.fxAccounts = fxAccounts;
+ await cleanup();
+
+ try {
+ collection.remove(remoteId);
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_dupe_device_ids() {
+ _(
+ "Ensure that we mark devices with duplicate fxaDeviceIds but older lastModified as stale."
+ );
+
+ await engine._store.wipe();
+ await generateNewKeys(Service.collectionKeys);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let remoteId = Utils.makeGUID();
+ let remoteId2 = Utils.makeGUID();
+ let remoteDeviceId = Utils.makeGUID();
+
+ let collection = server.getCollection("foo", "clients");
+
+ _("Create remote client records");
+ collection.insertRecord(
+ {
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ fxaDeviceId: remoteDeviceId,
+ protocols: ["1.5"],
+ },
+ new_timestamp() - 3
+ );
+ collection.insertRecord({
+ id: remoteId2,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ fxaDeviceId: remoteDeviceId,
+ protocols: ["1.5"],
+ });
+
+ let fxAccounts = engine.fxAccounts;
+ engine.fxAccounts = {
+ notifyDevices() {
+ return Promise.resolve(true);
+ },
+ device: {
+ getLocalId() {
+ return fxAccounts.device.getLocalId();
+ },
+ getLocalName() {
+ return fxAccounts.device.getLocalName();
+ },
+ getLocalType() {
+ return fxAccounts.device.getLocalType();
+ },
+ recentDeviceList: [{ id: remoteDeviceId }],
+ refreshDeviceList() {
+ return Promise.resolve(true);
+ },
+ },
+ _internal: {
+ now() {
+ return Date.now();
+ },
+ },
+ };
+
+ try {
+ _("Syncing.");
+ await syncClientsEngine(server);
+
+ ok(engine._store._remoteClients[remoteId].stale);
+ ok(!engine._store._remoteClients[remoteId2].stale);
+ } finally {
+ engine.fxAccounts = fxAccounts;
+ await cleanup();
+
+ try {
+ collection.remove(remoteId);
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_refresh_fxa_device_list() {
+ _("Ensure we refresh the fxa device list when we expect to.");
+
+ await engine._store.wipe();
+ engine._lastFxaDeviceRefresh = 0;
+ await generateNewKeys(Service.collectionKeys);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let numRefreshes = 0;
+ let now = Date.now();
+ let fxAccounts = engine.fxAccounts;
+ engine.fxAccounts = {
+ notifyDevices() {
+ return Promise.resolve(true);
+ },
+ device: {
+ getLocalId() {
+ return fxAccounts.device.getLocalId();
+ },
+ getLocalName() {
+ return fxAccounts.device.getLocalName();
+ },
+ getLocalType() {
+ return fxAccounts.device.getLocalType();
+ },
+ recentDeviceList: [],
+ refreshDeviceList() {
+ numRefreshes += 1;
+ return Promise.resolve(true);
+ },
+ },
+ _internal: {
+ now() {
+ return now;
+ },
+ },
+ };
+
+ try {
+ _("Syncing.");
+ await syncClientsEngine(server);
+ Assert.equal(numRefreshes, 1, "first sync should refresh");
+ now += 1000; // a second later.
+ await syncClientsEngine(server);
+ Assert.equal(numRefreshes, 1, "next sync should not refresh");
+ now += 60 * 60 * 2 * 1000; // 2 hours later
+ await syncClientsEngine(server);
+ Assert.equal(numRefreshes, 2, "2 hours later should refresh");
+ now += 1000; // a second later.
+ Assert.equal(numRefreshes, 2, "next sync should not refresh");
+ } finally {
+ await cleanup();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_optional_client_fields() {
+ _("Ensure that we produce records with the fields added in Bug 1097222.");
+
+ const SUPPORTED_PROTOCOL_VERSIONS = ["1.5"];
+ let local = await engine._store.createRecord(engine.localID, "clients");
+ equal(local.name, engine.localName);
+ equal(local.type, engine.localType);
+ equal(local.version, Services.appinfo.version);
+ deepEqual(local.protocols, SUPPORTED_PROTOCOL_VERSIONS);
+
+ // Optional fields.
+ // Make sure they're what they ought to be...
+ equal(local.os, Services.appinfo.OS);
+ equal(local.appPackage, Services.appinfo.ID);
+
+ // ... and also that they're non-empty.
+ ok(!!local.os);
+ ok(!!local.appPackage);
+ ok(!!local.application);
+
+ // We don't currently populate device or formfactor.
+ // See Bug 1100722, Bug 1100723.
+
+ await cleanup();
+});
+
+add_task(async function test_merge_commands() {
+ _("Verifies local commands for remote clients are merged with the server's");
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let collection = server.getCollection("foo", "clients");
+
+ let desktopID = Utils.makeGUID();
+ collection.insertRecord(
+ {
+ id: desktopID,
+ name: "Desktop client",
+ type: "desktop",
+ commands: [
+ {
+ command: "wipeEngine",
+ args: ["history"],
+ flowID: Utils.makeGUID(),
+ },
+ ],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ let mobileID = Utils.makeGUID();
+ collection.insertRecord(
+ {
+ id: mobileID,
+ name: "Mobile client",
+ type: "mobile",
+ commands: [
+ {
+ command: "logout",
+ args: [],
+ flowID: Utils.makeGUID(),
+ },
+ ],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ try {
+ _("First sync. 2 records downloaded.");
+ strictEqual(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+ await syncClientsEngine(server);
+
+ _("Broadcast logout to all clients");
+ await engine.sendCommand("logout", []);
+ await syncClientsEngine(server);
+
+ let desktopPayload = collection.cleartext(desktopID);
+ compareCommands(
+ desktopPayload.commands,
+ [
+ {
+ command: "wipeEngine",
+ args: ["history"],
+ },
+ {
+ command: "logout",
+ args: [],
+ },
+ ],
+ "Should send the logout command to the desktop client"
+ );
+
+ let mobilePayload = collection.cleartext(mobileID);
+ compareCommands(
+ mobilePayload.commands,
+ [{ command: "logout", args: [] }],
+ "Should not send a duplicate logout to the mobile client"
+ );
+ } finally {
+ await cleanup();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_duplicate_remote_commands() {
+ _(
+ "Verifies local commands for remote clients are sent only once (bug 1289287)"
+ );
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let collection = server.getCollection("foo", "clients");
+
+ let desktopID = Utils.makeGUID();
+ collection.insertRecord(
+ {
+ id: desktopID,
+ name: "Desktop client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ try {
+ _("First sync. 1 record downloaded.");
+ strictEqual(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+ await syncClientsEngine(server);
+
+ _("Send command to client to wipe history engine");
+ await engine.sendCommand("wipeEngine", ["history"]);
+ await syncClientsEngine(server);
+
+ _(
+ "Simulate the desktop client consuming the command and syncing to the server"
+ );
+ collection.insertRecord(
+ {
+ id: desktopID,
+ name: "Desktop client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ _("Send another command to the desktop client to wipe tabs engine");
+ await engine.sendCommand("wipeEngine", ["tabs"], desktopID);
+ await syncClientsEngine(server);
+
+ let desktopPayload = collection.cleartext(desktopID);
+ compareCommands(
+ desktopPayload.commands,
+ [
+ {
+ command: "wipeEngine",
+ args: ["tabs"],
+ },
+ ],
+ "Should only send the second command to the desktop client"
+ );
+ } finally {
+ await cleanup();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_upload_after_reboot() {
+ _("Multiple downloads, reboot, then upload (bug 1289287)");
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let collection = server.getCollection("foo", "clients");
+
+ let deviceBID = Utils.makeGUID();
+ let deviceCID = Utils.makeGUID();
+ collection.insertRecord(
+ {
+ id: deviceBID,
+ name: "Device B",
+ type: "desktop",
+ commands: [
+ {
+ command: "wipeEngine",
+ args: ["history"],
+ flowID: Utils.makeGUID(),
+ },
+ ],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+ collection.insertRecord(
+ {
+ id: deviceCID,
+ name: "Device C",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ try {
+ _("First sync. 2 records downloaded.");
+ strictEqual(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+ await syncClientsEngine(server);
+
+ _("Send command to client to wipe tab engine");
+ await engine.sendCommand("wipeEngine", ["tabs"], deviceBID);
+
+ const oldUploadOutgoing = SyncEngine.prototype._uploadOutgoing;
+ SyncEngine.prototype._uploadOutgoing = async () =>
+ engine._onRecordsWritten([], [deviceBID]);
+ await syncClientsEngine(server);
+
+ let deviceBPayload = collection.cleartext(deviceBID);
+ compareCommands(
+ deviceBPayload.commands,
+ [
+ {
+ command: "wipeEngine",
+ args: ["history"],
+ },
+ ],
+ "Should be the same because the upload failed"
+ );
+
+ _("Simulate the client B consuming the command and syncing to the server");
+ collection.insertRecord(
+ {
+ id: deviceBID,
+ name: "Device B",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ // Simulate reboot
+ SyncEngine.prototype._uploadOutgoing = oldUploadOutgoing;
+ engine = Service.clientsEngine = new ClientEngine(Service);
+ await engine.initialize();
+
+ await syncClientsEngine(server);
+
+ deviceBPayload = collection.cleartext(deviceBID);
+ compareCommands(
+ deviceBPayload.commands,
+ [
+ {
+ command: "wipeEngine",
+ args: ["tabs"],
+ },
+ ],
+ "Should only had written our outgoing command"
+ );
+ } finally {
+ await cleanup();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_keep_cleared_commands_after_reboot() {
+ _(
+ "Download commands, fail upload, reboot, then apply new commands (bug 1289287)"
+ );
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let collection = server.getCollection("foo", "clients");
+
+ let deviceBID = Utils.makeGUID();
+ let deviceCID = Utils.makeGUID();
+ collection.insertRecord(
+ {
+ id: engine.localID,
+ name: "Device A",
+ type: "desktop",
+ commands: [
+ {
+ command: "wipeEngine",
+ args: ["history"],
+ flowID: Utils.makeGUID(),
+ },
+ {
+ command: "wipeEngine",
+ args: ["tabs"],
+ flowID: Utils.makeGUID(),
+ },
+ ],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+ collection.insertRecord(
+ {
+ id: deviceBID,
+ name: "Device B",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+ collection.insertRecord(
+ {
+ id: deviceCID,
+ name: "Device C",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ try {
+ _("First sync. Download remote and our record.");
+ strictEqual(engine.lastRecordUpload, 0);
+ ok(engine.isFirstSync);
+
+ const oldUploadOutgoing = SyncEngine.prototype._uploadOutgoing;
+ SyncEngine.prototype._uploadOutgoing = async () =>
+ engine._onRecordsWritten([], [deviceBID]);
+ let commandsProcessed = 0;
+ engine.service.wipeClient = _engine => {
+ commandsProcessed++;
+ };
+
+ await syncClientsEngine(server);
+ await engine.processIncomingCommands(); // Not called by the engine.sync(), gotta call it ourselves
+ equal(commandsProcessed, 2, "We processed 2 commands");
+
+ let localRemoteRecord = collection.cleartext(engine.localID);
+ compareCommands(
+ localRemoteRecord.commands,
+ [
+ {
+ command: "wipeEngine",
+ args: ["history"],
+ },
+ {
+ command: "wipeEngine",
+ args: ["tabs"],
+ },
+ ],
+ "Should be the same because the upload failed"
+ );
+
+ // Another client sends a wipe command
+ collection.insertRecord(
+ {
+ id: engine.localID,
+ name: "Device A",
+ type: "desktop",
+ commands: [
+ {
+ command: "wipeEngine",
+ args: ["history"],
+ flowID: Utils.makeGUID(),
+ },
+ {
+ command: "wipeEngine",
+ args: ["tabs"],
+ flowID: Utils.makeGUID(),
+ },
+ {
+ command: "wipeEngine",
+ args: ["bookmarks"],
+ flowID: Utils.makeGUID(),
+ },
+ ],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 5
+ );
+
+ // Simulate reboot
+ SyncEngine.prototype._uploadOutgoing = oldUploadOutgoing;
+ engine = Service.clientsEngine = new ClientEngine(Service);
+ await engine.initialize();
+
+ commandsProcessed = 0;
+ engine.service.wipeClient = _engine => {
+ commandsProcessed++;
+ };
+ await syncClientsEngine(server);
+ await engine.processIncomingCommands();
+ equal(
+ commandsProcessed,
+ 1,
+ "We processed one command (the other were cleared)"
+ );
+
+ localRemoteRecord = collection.cleartext(deviceBID);
+ deepEqual(localRemoteRecord.commands, [], "Should be empty");
+ } finally {
+ await cleanup();
+
+ // Reset service (remove mocks)
+ engine = Service.clientsEngine = new ClientEngine(Service);
+ await engine.initialize();
+ await engine._resetClient();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_deleted_commands() {
+ _("Verifies commands for a deleted client are discarded");
+
+ let now = new_timestamp();
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let collection = server.getCollection("foo", "clients");
+
+ let activeID = Utils.makeGUID();
+ collection.insertRecord(
+ {
+ id: activeID,
+ name: "Active client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ let deletedID = Utils.makeGUID();
+ collection.insertRecord(
+ {
+ id: deletedID,
+ name: "Client to delete",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ },
+ now - 10
+ );
+
+ try {
+ _("First sync. 2 records downloaded.");
+ await syncClientsEngine(server);
+
+ _("Delete a record on the server.");
+ collection.remove(deletedID);
+
+ _("Broadcast a command to all clients");
+ await engine.sendCommand("logout", []);
+ await syncClientsEngine(server);
+
+ deepEqual(
+ collection.keys().sort(),
+ [activeID, engine.localID].sort(),
+ "Should not reupload deleted clients"
+ );
+
+ let activePayload = collection.cleartext(activeID);
+ compareCommands(
+ activePayload.commands,
+ [{ command: "logout", args: [] }],
+ "Should send the command to the active client"
+ );
+ } finally {
+ await cleanup();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_command_sync() {
+ _("Notify other clients when writing their record.");
+
+ await engine._store.wipe();
+ await generateNewKeys(Service.collectionKeys);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.getCollection("foo", "clients");
+ let remoteId = Utils.makeGUID();
+ let remoteId2 = Utils.makeGUID();
+
+ _("Create remote client record 1");
+ collection.insertRecord({
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ });
+
+ _("Create remote client record 2");
+ collection.insertRecord({
+ id: remoteId2,
+ name: "Remote client 2",
+ type: "mobile",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ });
+
+ try {
+ equal(collection.count(), 2, "2 remote records written");
+ await syncClientsEngine(server);
+ equal(
+ collection.count(),
+ 3,
+ "3 remote records written (+1 for the synced local record)"
+ );
+
+ await engine.sendCommand("wipeEngine", ["tabs"]);
+ await engine._tracker.addChangedID(engine.localID);
+ const getClientFxaDeviceId = sinon
+ .stub(engine, "getClientFxaDeviceId")
+ .callsFake(id => "fxa-" + id);
+ const engineMock = sinon.mock(engine);
+ let _notifyCollectionChanged = engineMock
+ .expects("_notifyCollectionChanged")
+ .withArgs(["fxa-" + remoteId, "fxa-" + remoteId2]);
+ _("Syncing.");
+ await syncClientsEngine(server);
+ _notifyCollectionChanged.verify();
+
+ engineMock.restore();
+ getClientFxaDeviceId.restore();
+ } finally {
+ await cleanup();
+ await engine._tracker.clearChangedIDs();
+
+ try {
+ server.deleteCollections("foo");
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function ensureSameFlowIDs() {
+ let events = [];
+ let origRecordTelemetryEvent = Service.recordTelemetryEvent;
+ Service.recordTelemetryEvent = (object, method, value, extra) => {
+ events.push({ object, method, value, extra });
+ };
+
+ let server = await serverForFoo(engine);
+ try {
+ // Setup 2 clients, send them a command, and ensure we get to events
+ // written, both with the same flowID.
+ await SyncTestingInfrastructure(server);
+ let collection = server.getCollection("foo", "clients");
+
+ let remoteId = Utils.makeGUID();
+ let remoteId2 = Utils.makeGUID();
+
+ _("Create remote client record 1");
+ collection.insertRecord({
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ });
+
+ _("Create remote client record 2");
+ collection.insertRecord({
+ id: remoteId2,
+ name: "Remote client 2",
+ type: "mobile",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ });
+
+ await syncClientsEngine(server);
+ await engine.sendCommand("wipeEngine", ["tabs"]);
+ await syncClientsEngine(server);
+ equal(events.length, 2);
+ // we don't know what the flowID is, but do know it should be the same.
+ equal(events[0].extra.flowID, events[1].extra.flowID);
+ // Wipe remote clients to ensure deduping doesn't prevent us from adding the command.
+ for (let client of Object.values(engine._store._remoteClients)) {
+ client.commands = [];
+ }
+ // check it's correctly used when we specify a flow ID
+ events.length = 0;
+ let flowID = Utils.makeGUID();
+ await engine.sendCommand("wipeEngine", ["tabs"], null, { flowID });
+ await syncClientsEngine(server);
+ equal(events.length, 2);
+ equal(events[0].extra.flowID, flowID);
+ equal(events[1].extra.flowID, flowID);
+
+ // Wipe remote clients to ensure deduping doesn't prevent us from adding the command.
+ for (let client of Object.values(engine._store._remoteClients)) {
+ client.commands = [];
+ }
+
+ // and that it works when something else is in "extra"
+ events.length = 0;
+ await engine.sendCommand("wipeEngine", ["tabs"], null, {
+ reason: "testing",
+ });
+ await syncClientsEngine(server);
+ equal(events.length, 2);
+ equal(events[0].extra.flowID, events[1].extra.flowID);
+ equal(events[0].extra.reason, "testing");
+ equal(events[1].extra.reason, "testing");
+ // Wipe remote clients to ensure deduping doesn't prevent us from adding the command.
+ for (let client of Object.values(engine._store._remoteClients)) {
+ client.commands = [];
+ }
+
+ // and when both are specified.
+ events.length = 0;
+ await engine.sendCommand("wipeEngine", ["tabs"], null, {
+ reason: "testing",
+ flowID,
+ });
+ await syncClientsEngine(server);
+ equal(events.length, 2);
+ equal(events[0].extra.flowID, flowID);
+ equal(events[1].extra.flowID, flowID);
+ equal(events[0].extra.reason, "testing");
+ equal(events[1].extra.reason, "testing");
+ // Wipe remote clients to ensure deduping doesn't prevent us from adding the command.
+ for (let client of Object.values(engine._store._remoteClients)) {
+ client.commands = [];
+ }
+ } finally {
+ Service.recordTelemetryEvent = origRecordTelemetryEvent;
+ cleanup();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_duplicate_commands_telemetry() {
+ let events = [];
+ let origRecordTelemetryEvent = Service.recordTelemetryEvent;
+ Service.recordTelemetryEvent = (object, method, value, extra) => {
+ events.push({ object, method, value, extra });
+ };
+
+ let server = await serverForFoo(engine);
+ try {
+ await SyncTestingInfrastructure(server);
+ let collection = server.getCollection("foo", "clients");
+
+ let remoteId = Utils.makeGUID();
+ let remoteId2 = Utils.makeGUID();
+
+ _("Create remote client record 1");
+ collection.insertRecord({
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ });
+
+ _("Create remote client record 2");
+ collection.insertRecord({
+ id: remoteId2,
+ name: "Remote client 2",
+ type: "mobile",
+ commands: [],
+ version: "48",
+ protocols: ["1.5"],
+ });
+
+ await syncClientsEngine(server);
+ // Make sure deduping works before syncing
+ await engine.sendCommand("wipeEngine", ["history"], remoteId);
+ await engine.sendCommand("wipeEngine", ["history"], remoteId);
+ equal(events.length, 1);
+ await syncClientsEngine(server);
+ // And after syncing.
+ await engine.sendCommand("wipeEngine", ["history"], remoteId);
+ equal(events.length, 1);
+ // Ensure we aren't deduping commands to different clients
+ await engine.sendCommand("wipeEngine", ["history"], remoteId2);
+ equal(events.length, 2);
+ } finally {
+ Service.recordTelemetryEvent = origRecordTelemetryEvent;
+ cleanup();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_other_clients_notified_on_first_sync() {
+ _(
+ "Ensure that other clients are notified when we upload our client record for the first time."
+ );
+
+ await engine.resetLastSync();
+ await engine._store.wipe();
+ await generateNewKeys(Service.collectionKeys);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ const fxAccounts = engine.fxAccounts;
+ let calls = 0;
+ engine.fxAccounts = {
+ device: {
+ getLocalId() {
+ return fxAccounts.device.getLocalId();
+ },
+ getLocalName() {
+ return fxAccounts.device.getLocalName();
+ },
+ getLocalType() {
+ return fxAccounts.device.getLocalType();
+ },
+ },
+ notifyDevices() {
+ calls++;
+ return Promise.resolve(true);
+ },
+ _internal: {
+ now() {
+ return Date.now();
+ },
+ },
+ };
+
+ try {
+ engine.lastRecordUpload = 0;
+ _("First sync, should notify other clients");
+ await syncClientsEngine(server);
+ equal(calls, 1);
+
+ _("Second sync, should not notify other clients");
+ await syncClientsEngine(server);
+ equal(calls, 1);
+ } finally {
+ engine.fxAccounts = fxAccounts;
+ cleanup();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(
+ async function device_disconnected_notification_updates_known_stale_clients() {
+ const spyUpdate = sinon.spy(engine, "updateKnownStaleClients");
+
+ Services.obs.notifyObservers(
+ null,
+ "fxaccounts:device_disconnected",
+ JSON.stringify({ isLocalDevice: false })
+ );
+ ok(spyUpdate.calledOnce, "updateKnownStaleClients should be called");
+ spyUpdate.resetHistory();
+
+ Services.obs.notifyObservers(
+ null,
+ "fxaccounts:device_disconnected",
+ JSON.stringify({ isLocalDevice: true })
+ );
+ ok(spyUpdate.notCalled, "updateKnownStaleClients should not be called");
+
+ spyUpdate.restore();
+ }
+);
+
+add_task(async function update_known_stale_clients() {
+ const makeFakeClient = id => ({ id, fxaDeviceId: `fxa-${id}` });
+ const clients = [
+ makeFakeClient("one"),
+ makeFakeClient("two"),
+ makeFakeClient("three"),
+ ];
+ const stubRemoteClients = sinon
+ .stub(engine._store, "_remoteClients")
+ .get(() => {
+ return clients;
+ });
+ const stubFetchFxADevices = sinon
+ .stub(engine, "_fetchFxADevices")
+ .callsFake(() => {
+ engine._knownStaleFxADeviceIds = ["fxa-one", "fxa-two"];
+ });
+
+ engine._knownStaleFxADeviceIds = null;
+ await engine.updateKnownStaleClients();
+ ok(clients[0].stale);
+ ok(clients[1].stale);
+ ok(!clients[2].stale);
+
+ stubRemoteClients.restore();
+ stubFetchFxADevices.restore();
+});
+
+add_task(async function test_create_record_command_limit() {
+ await engine._store.wipe();
+ await generateNewKeys(Service.collectionKeys);
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ const fakeLimit = 4 * 1024;
+
+ let maxSizeStub = sinon
+ .stub(Service, "getMemcacheMaxRecordPayloadSize")
+ .callsFake(() => fakeLimit);
+
+ let user = server.user("foo");
+ let remoteId = Utils.makeGUID();
+
+ _("Create remote client record");
+ user.collection("clients").insertRecord({
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "57",
+ protocols: ["1.5"],
+ });
+
+ try {
+ _("Initial sync.");
+ await syncClientsEngine(server);
+
+ _("Send a fairly sane number of commands.");
+
+ for (let i = 0; i < 5; ++i) {
+ await engine.sendCommand("wipeEngine", [`history: ${i}`], remoteId);
+ }
+
+ await syncClientsEngine(server);
+
+ _("Make sure they all fit and weren't dropped.");
+ let parsedServerRecord = user.collection("clients").cleartext(remoteId);
+
+ equal(parsedServerRecord.commands.length, 5);
+
+ await engine.sendCommand("wipeEngine", ["history"], remoteId);
+
+ _("Send a not-sane number of commands.");
+ // Much higher than the maximum number of commands we could actually fit.
+ for (let i = 0; i < 500; ++i) {
+ await engine.sendCommand("wipeEngine", [`tabs: ${i}`], remoteId);
+ }
+
+ await syncClientsEngine(server);
+
+ _("Ensure we didn't overflow the server limit.");
+ let wbo = user.collection("clients").wbo(remoteId);
+ less(wbo.payload.length, fakeLimit);
+
+ _(
+ "And that the data we uploaded is both sane json and containing some commands."
+ );
+ let remoteCommands = wbo.getCleartext().commands;
+ greater(remoteCommands.length, 2);
+ let firstCommand = remoteCommands[0];
+ _(
+ "The first command should still be present, since it had a high priority"
+ );
+ equal(firstCommand.command, "wipeEngine");
+ _("And the last command in the list should be the last command we sent.");
+ let lastCommand = remoteCommands[remoteCommands.length - 1];
+ equal(lastCommand.command, "wipeEngine");
+ deepEqual(lastCommand.args, ["tabs: 499"]);
+ } finally {
+ maxSizeStub.restore();
+ await cleanup();
+ try {
+ let collection = server.getCollection("foo", "clients");
+ collection.remove(remoteId);
+ } finally {
+ await promiseStopServer(server);
+ }
+ }
+});
diff --git a/services/sync/tests/unit/test_clients_escape.js b/services/sync/tests/unit/test_clients_escape.js
new file mode 100644
index 0000000000..53ec7fd7a9
--- /dev/null
+++ b/services/sync/tests/unit/test_clients_escape.js
@@ -0,0 +1,57 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function test_clients_escape() {
+ _("Set up test fixtures.");
+
+ await configureIdentity();
+ let keyBundle = Service.identity.syncKeyBundle;
+
+ let engine = Service.clientsEngine;
+
+ try {
+ _("Test that serializing client records results in uploadable ascii");
+ engine.localID = "ascii";
+ engine.localName = "wéävê";
+
+ _("Make sure we have the expected record");
+ let record = await engine._createRecord("ascii");
+ Assert.equal(record.id, "ascii");
+ Assert.equal(record.name, "wéävê");
+
+ _("Encrypting record...");
+ await record.encrypt(keyBundle);
+ _("Encrypted.");
+
+ let serialized = JSON.stringify(record);
+ let checkCount = 0;
+ _("Checking for all ASCII:", serialized);
+ for (let ch of serialized) {
+ let code = ch.charCodeAt(0);
+ _("Checking asciiness of '", ch, "'=", code);
+ Assert.ok(code < 128);
+ checkCount++;
+ }
+
+ _("Processed", checkCount, "characters out of", serialized.length);
+ Assert.equal(checkCount, serialized.length);
+
+ _("Making sure the record still looks like it did before");
+ await record.decrypt(keyBundle);
+ Assert.equal(record.id, "ascii");
+ Assert.equal(record.name, "wéävê");
+
+ _("Sanity check that creating the record also gives the same");
+ record = await engine._createRecord("ascii");
+ Assert.equal(record.id, "ascii");
+ Assert.equal(record.name, "wéävê");
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
diff --git a/services/sync/tests/unit/test_collection_getBatched.js b/services/sync/tests/unit/test_collection_getBatched.js
new file mode 100644
index 0000000000..f5425abe92
--- /dev/null
+++ b/services/sync/tests/unit/test_collection_getBatched.js
@@ -0,0 +1,187 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Collection, WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function recordRange(lim, offset, total) {
+ let res = [];
+ for (let i = offset; i < Math.min(lim + offset, total); ++i) {
+ res.push({ id: String(i), payload: "test:" + i });
+ }
+ return res;
+}
+
+function get_test_collection_info({
+ totalRecords,
+ batchSize,
+ lastModified,
+ throwAfter = Infinity,
+ interruptedAfter = Infinity,
+}) {
+ let coll = new Collection("http://example.com/test/", WBORecord, Service);
+ coll.full = true;
+ let requests = [];
+ let responses = [];
+ coll.get = async function () {
+ let limit = +this.limit;
+ let offset = 0;
+ if (this.offset) {
+ equal(this.offset.slice(0, 6), "foobar");
+ offset = +this.offset.slice(6);
+ }
+ requests.push({
+ limit,
+ offset,
+ spec: this.spec,
+ headers: Object.assign({}, this.headers),
+ });
+ if (--throwAfter === 0) {
+ throw new Error("Some Network Error");
+ }
+ let body = recordRange(limit, offset, totalRecords);
+ let response = {
+ obj: body,
+ success: true,
+ status: 200,
+ headers: {},
+ };
+ if (--interruptedAfter === 0) {
+ response.success = false;
+ response.status = 412;
+ response.body = "";
+ } else if (offset + limit < totalRecords) {
+ // Ensure we're treating this as an opaque string, since the docs say
+ // it might not be numeric.
+ response.headers["x-weave-next-offset"] = "foobar" + (offset + batchSize);
+ }
+ response.headers["x-last-modified"] = lastModified;
+ responses.push(response);
+ return response;
+ };
+ return { responses, requests, coll };
+}
+
+add_task(async function test_success() {
+ const totalRecords = 11;
+ const batchSize = 2;
+ const lastModified = "111111";
+ let { responses, requests, coll } = get_test_collection_info({
+ totalRecords,
+ batchSize,
+ lastModified,
+ });
+ let { response, records } = await coll.getBatched(batchSize);
+
+ equal(requests.length, Math.ceil(totalRecords / batchSize));
+
+ equal(records.length, totalRecords);
+ checkRecordsOrder(records);
+
+ // ensure we're returning the last response
+ equal(responses[responses.length - 1], response);
+
+ // check first separately since its a bit of a special case
+ ok(!requests[0].headers["x-if-unmodified-since"]);
+ ok(!requests[0].offset);
+ equal(requests[0].limit, batchSize);
+ let expectedOffset = 2;
+ for (let i = 1; i < requests.length; ++i) {
+ let req = requests[i];
+ equal(req.headers["x-if-unmodified-since"], lastModified);
+ equal(req.limit, batchSize);
+ if (i !== requests.length - 1) {
+ equal(req.offset, expectedOffset);
+ }
+
+ expectedOffset += batchSize;
+ }
+
+ // ensure we cleaned up anything that would break further
+ // use of this collection.
+ ok(!coll._headers["x-if-unmodified-since"]);
+ ok(!coll.offset);
+ ok(!coll.limit || coll.limit == Infinity);
+});
+
+add_task(async function test_total_limit() {
+ _("getBatched respects the (initial) value of the limit property");
+ const totalRecords = 100;
+ const recordLimit = 11;
+ const batchSize = 2;
+ const lastModified = "111111";
+ let { requests, coll } = get_test_collection_info({
+ totalRecords,
+ batchSize,
+ lastModified,
+ });
+ coll.limit = recordLimit;
+ let { records } = await coll.getBatched(batchSize);
+ checkRecordsOrder(records);
+
+ equal(requests.length, Math.ceil(recordLimit / batchSize));
+ equal(records.length, recordLimit);
+
+ for (let i = 0; i < requests.length; ++i) {
+ let req = requests[i];
+ if (i !== requests.length - 1) {
+ equal(req.limit, batchSize);
+ } else {
+ equal(req.limit, recordLimit % batchSize);
+ }
+ }
+
+ equal(coll._limit, recordLimit);
+});
+
+add_task(async function test_412() {
+ _("We shouldn't record records if we get a 412 in the middle of a batch");
+ const totalRecords = 11;
+ const batchSize = 2;
+ const lastModified = "111111";
+ let { responses, requests, coll } = get_test_collection_info({
+ totalRecords,
+ batchSize,
+ lastModified,
+ interruptedAfter: 3,
+ });
+ let { response, records } = await coll.getBatched(batchSize);
+
+ equal(requests.length, 3);
+ equal(records.length, 0); // we should not get any records
+
+ // ensure we're returning the last response
+ equal(responses[responses.length - 1], response);
+
+ ok(!response.success);
+ equal(response.status, 412);
+});
+
+add_task(async function test_get_throws() {
+ _("getBatched() should throw if a get() throws");
+ const totalRecords = 11;
+ const batchSize = 2;
+ const lastModified = "111111";
+ let { requests, coll } = get_test_collection_info({
+ totalRecords,
+ batchSize,
+ lastModified,
+ throwAfter: 3,
+ });
+
+ await Assert.rejects(coll.getBatched(batchSize), /Some Network Error/);
+
+ equal(requests.length, 3);
+});
+
+function checkRecordsOrder(records) {
+ ok(!!records.length);
+ for (let i = 0; i < records.length; i++) {
+ equal(records[i].id, String(i));
+ equal(records[i].payload, "test:" + i);
+ }
+}
diff --git a/services/sync/tests/unit/test_collections_recovery.js b/services/sync/tests/unit/test_collections_recovery.js
new file mode 100644
index 0000000000..fd923bc272
--- /dev/null
+++ b/services/sync/tests/unit/test_collections_recovery.js
@@ -0,0 +1,95 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Verify that we wipe the server if we have to regenerate keys.
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function test_missing_crypto_collection() {
+ enableValidationPrefs();
+
+ let johnHelper = track_collections_helper();
+ let johnU = johnHelper.with_updated_collection;
+ let johnColls = johnHelper.collections;
+
+ let empty = false;
+ function maybe_empty(handler) {
+ return function (request, response) {
+ if (empty) {
+ let body = "{}";
+ response.setStatusLine(request.httpVersion, 200, "OK");
+ response.bodyOutputStream.write(body, body.length);
+ } else {
+ handler(request, response);
+ }
+ };
+ }
+
+ let handlers = {
+ "/1.1/johndoe/info/collections": maybe_empty(johnHelper.handler),
+ "/1.1/johndoe/storage/crypto/keys": johnU(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/meta/global": johnU(
+ "meta",
+ new ServerWBO("global").handler()
+ ),
+ };
+ let collections = [
+ "clients",
+ "bookmarks",
+ "forms",
+ "history",
+ "passwords",
+ "prefs",
+ "tabs",
+ ];
+ // Disable addon sync because AddonManager won't be initialized here.
+ await Service.engineManager.unregister("addons");
+ await Service.engineManager.unregister("extension-storage");
+
+ for (let coll of collections) {
+ handlers["/1.1/johndoe/storage/" + coll] = johnU(
+ coll,
+ new ServerCollection({}, true).handler()
+ );
+ }
+ let server = httpd_setup(handlers);
+ await configureIdentity({ username: "johndoe" }, server);
+
+ try {
+ let fresh = 0;
+ let orig = Service._freshStart;
+ Service._freshStart = async function () {
+ _("Called _freshStart.");
+ await orig.call(Service);
+ fresh++;
+ };
+
+ _("Startup, no meta/global: freshStart called once.");
+ await sync_and_validate_telem();
+ Assert.equal(fresh, 1);
+ fresh = 0;
+
+ _("Regular sync: no need to freshStart.");
+ await Service.sync();
+ Assert.equal(fresh, 0);
+
+ _("Simulate a bad info/collections.");
+ delete johnColls.crypto;
+ await sync_and_validate_telem();
+ Assert.equal(fresh, 1);
+ fresh = 0;
+
+ _("Regular sync: no need to freshStart.");
+ await sync_and_validate_telem();
+ Assert.equal(fresh, 0);
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_corrupt_keys.js b/services/sync/tests/unit/test_corrupt_keys.js
new file mode 100644
index 0000000000..06d7335985
--- /dev/null
+++ b/services/sync/tests/unit/test_corrupt_keys.js
@@ -0,0 +1,248 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Weave } = ChromeUtils.importESModule(
+ "resource://services-sync/main.sys.mjs"
+);
+const { HistoryEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/history.sys.mjs"
+);
+const { CryptoWrapper, WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function test_locally_changed_keys() {
+ enableValidationPrefs();
+
+ let hmacErrorCount = 0;
+ function counting(f) {
+ return async function () {
+ hmacErrorCount++;
+ return f.call(this);
+ };
+ }
+
+ Service.handleHMACEvent = counting(Service.handleHMACEvent);
+
+ let server = new SyncServer();
+ let johndoe = server.registerUser("johndoe", "password");
+ johndoe.createContents({
+ meta: {},
+ crypto: {},
+ clients: {},
+ });
+ server.start();
+
+ try {
+ Svc.PrefBranch.setStringPref("registerEngines", "Tab");
+
+ await configureIdentity({ username: "johndoe" }, server);
+ // We aren't doing a .login yet, so fudge the cluster URL.
+ Service.clusterURL = Service.identity._token.endpoint;
+
+ await Service.engineManager.register(HistoryEngine);
+ // Disable addon sync because AddonManager won't be initialized here.
+ await Service.engineManager.unregister("addons");
+ await Service.engineManager.unregister("extension-storage");
+
+ async function corrupt_local_keys() {
+ Service.collectionKeys._default.keyPair = [
+ await Weave.Crypto.generateRandomKey(),
+ await Weave.Crypto.generateRandomKey(),
+ ];
+ }
+
+ _("Setting meta.");
+
+ // Bump version on the server.
+ let m = new WBORecord("meta", "global");
+ m.payload = {
+ syncID: "foooooooooooooooooooooooooo",
+ storageVersion: STORAGE_VERSION,
+ };
+ await m.upload(Service.resource(Service.metaURL));
+
+ _(
+ "New meta/global: " +
+ JSON.stringify(johndoe.collection("meta").wbo("global"))
+ );
+
+ // Upload keys.
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ Assert.ok(
+ (await serverKeys.upload(Service.resource(Service.cryptoKeysURL))).success
+ );
+
+ // Check that login works.
+ Assert.ok(await Service.login());
+ Assert.ok(Service.isLoggedIn);
+
+ // Sync should upload records.
+ await sync_and_validate_telem();
+
+ // Tabs exist.
+ _("Tabs modified: " + johndoe.modified("tabs"));
+ Assert.ok(johndoe.modified("tabs") > 0);
+
+ // Let's create some server side history records.
+ let liveKeys = Service.collectionKeys.keyForCollection("history");
+ _("Keys now: " + liveKeys.keyPair);
+ let visitType = Ci.nsINavHistoryService.TRANSITION_LINK;
+ let history = johndoe.createCollection("history");
+ for (let i = 0; i < 5; i++) {
+ let id = "record-no--" + i;
+ let modified = Date.now() / 1000 - 60 * (i + 10);
+
+ let w = new CryptoWrapper("history", "id");
+ w.cleartext = {
+ id,
+ histUri: "http://foo/bar?" + id,
+ title: id,
+ sortindex: i,
+ visits: [{ date: (modified - 5) * 1000000, type: visitType }],
+ deleted: false,
+ };
+ await w.encrypt(liveKeys);
+
+ let payload = { ciphertext: w.ciphertext, IV: w.IV, hmac: w.hmac };
+ history.insert(id, payload, modified);
+ }
+
+ history.timestamp = Date.now() / 1000;
+ let old_key_time = johndoe.modified("crypto");
+ _("Old key time: " + old_key_time);
+
+ // Check that we can decrypt one.
+ let rec = new CryptoWrapper("history", "record-no--0");
+ await rec.fetch(
+ Service.resource(Service.storageURL + "history/record-no--0")
+ );
+ _(JSON.stringify(rec));
+ Assert.ok(!!(await rec.decrypt(liveKeys)));
+
+ Assert.equal(hmacErrorCount, 0);
+
+ // Fill local key cache with bad data.
+ await corrupt_local_keys();
+ _(
+ "Keys now: " + Service.collectionKeys.keyForCollection("history").keyPair
+ );
+
+ Assert.equal(hmacErrorCount, 0);
+
+ _("HMAC error count: " + hmacErrorCount);
+ // Now syncing should succeed, after one HMAC error.
+ await sync_and_validate_telem(ping => {
+ Assert.equal(
+ ping.engines.find(e => e.name == "history").incoming.applied,
+ 5
+ );
+ });
+
+ Assert.equal(hmacErrorCount, 1);
+ _(
+ "Keys now: " + Service.collectionKeys.keyForCollection("history").keyPair
+ );
+
+ // And look! We downloaded history!
+ Assert.ok(
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--0")
+ );
+ Assert.ok(
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--1")
+ );
+ Assert.ok(
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--2")
+ );
+ Assert.ok(
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--3")
+ );
+ Assert.ok(
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--4")
+ );
+ Assert.equal(hmacErrorCount, 1);
+
+ _("Busting some new server values.");
+ // Now what happens if we corrupt the HMAC on the server?
+ for (let i = 5; i < 10; i++) {
+ let id = "record-no--" + i;
+ let modified = 1 + Date.now() / 1000;
+
+ let w = new CryptoWrapper("history", "id");
+ w.cleartext = {
+ id,
+ histUri: "http://foo/bar?" + id,
+ title: id,
+ sortindex: i,
+ visits: [{ date: (modified - 5) * 1000000, type: visitType }],
+ deleted: false,
+ };
+ await w.encrypt(Service.collectionKeys.keyForCollection("history"));
+ w.hmac = w.hmac.toUpperCase();
+
+ let payload = { ciphertext: w.ciphertext, IV: w.IV, hmac: w.hmac };
+ history.insert(id, payload, modified);
+ }
+ history.timestamp = Date.now() / 1000;
+
+ _("Server key time hasn't changed.");
+ Assert.equal(johndoe.modified("crypto"), old_key_time);
+
+ _("Resetting HMAC error timer.");
+ Service.lastHMACEvent = 0;
+
+ _("Syncing...");
+ await sync_and_validate_telem(ping => {
+ Assert.equal(
+ ping.engines.find(e => e.name == "history").incoming.failed,
+ 5
+ );
+ });
+
+ _(
+ "Keys now: " + Service.collectionKeys.keyForCollection("history").keyPair
+ );
+ _(
+ "Server keys have been updated, and we skipped over 5 more HMAC errors without adjusting history."
+ );
+ Assert.ok(johndoe.modified("crypto") > old_key_time);
+ Assert.equal(hmacErrorCount, 6);
+ Assert.equal(
+ false,
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--5")
+ );
+ Assert.equal(
+ false,
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--6")
+ );
+ Assert.equal(
+ false,
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--7")
+ );
+ Assert.equal(
+ false,
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--8")
+ );
+ Assert.equal(
+ false,
+ await PlacesUtils.history.hasVisits("http://foo/bar?record-no--9")
+ );
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
+
+function run_test() {
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+ validate_all_future_pings();
+
+ run_next_test();
+}
diff --git a/services/sync/tests/unit/test_declined.js b/services/sync/tests/unit/test_declined.js
new file mode 100644
index 0000000000..af7f8eb8c5
--- /dev/null
+++ b/services/sync/tests/unit/test_declined.js
@@ -0,0 +1,194 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { DeclinedEngines } = ChromeUtils.importESModule(
+ "resource://services-sync/stages/declined.sys.mjs"
+);
+const { EngineSynchronizer } = ChromeUtils.importESModule(
+ "resource://services-sync/stages/enginesync.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Observers } = ChromeUtils.importESModule(
+ "resource://services-common/observers.sys.mjs"
+);
+
+function PetrolEngine() {}
+PetrolEngine.prototype.name = "petrol";
+
+function DieselEngine() {}
+DieselEngine.prototype.name = "diesel";
+
+function DummyEngine() {}
+DummyEngine.prototype.name = "dummy";
+
+function ActualEngine() {}
+ActualEngine.prototype.name = "actual";
+Object.setPrototypeOf(ActualEngine.prototype, SyncEngine.prototype);
+
+function getEngineManager() {
+ let manager = new EngineManager(Service);
+ Service.engineManager = manager;
+ manager._engines = {
+ petrol: new PetrolEngine(),
+ diesel: new DieselEngine(),
+ dummy: new DummyEngine(),
+ actual: new ActualEngine(),
+ };
+ return manager;
+}
+
+/**
+ * 'Fetch' a meta/global record that doesn't mention declined.
+ *
+ * Push it into the EngineSynchronizer to set enabled; verify that those are
+ * correct.
+ *
+ * Then push it into DeclinedEngines to set declined; verify that none are
+ * declined, and a notification is sent for our locally disabled-but-not-
+ * declined engines.
+ */
+add_task(async function testOldMeta() {
+ let meta = {
+ payload: {
+ engines: {
+ petrol: 1,
+ diesel: 2,
+ nonlocal: 3, // Enabled but not supported.
+ },
+ },
+ };
+
+ _("Record: " + JSON.stringify(meta));
+
+ let manager = getEngineManager();
+
+ // Update enabled from meta/global.
+ let engineSync = new EngineSynchronizer(Service);
+ await engineSync._updateEnabledFromMeta(meta, 3, manager);
+
+ Assert.ok(manager._engines.petrol.enabled, "'petrol' locally enabled.");
+ Assert.ok(manager._engines.diesel.enabled, "'diesel' locally enabled.");
+ Assert.ok(
+ !("nonlocal" in manager._engines),
+ "We don't know anything about the 'nonlocal' engine."
+ );
+ Assert.ok(!manager._engines.actual.enabled, "'actual' not locally enabled.");
+ Assert.ok(!manager.isDeclined("actual"), "'actual' not declined, though.");
+
+ let declinedEngines = new DeclinedEngines(Service);
+
+ function onNotDeclined(subject, topic, data) {
+ Observers.remove("weave:engines:notdeclined", onNotDeclined);
+ Assert.ok(
+ subject.undecided.has("actual"),
+ "EngineManager observed that 'actual' was undecided."
+ );
+
+ let declined = manager.getDeclined();
+ _("Declined: " + JSON.stringify(declined));
+
+ Assert.ok(!meta.changed, "No need to upload a new meta/global.");
+ run_next_test();
+ }
+
+ Observers.add("weave:engines:notdeclined", onNotDeclined);
+
+ declinedEngines.updateDeclined(meta, manager);
+});
+
+/**
+ * 'Fetch' a meta/global that declines an engine we don't
+ * recognize. Ensure that we track that declined engine along
+ * with any we locally declined, and that the meta/global
+ * record is marked as changed and includes all declined
+ * engines.
+ */
+add_task(async function testDeclinedMeta() {
+ let meta = {
+ payload: {
+ engines: {
+ petrol: 1,
+ diesel: 2,
+ nonlocal: 3, // Enabled but not supported.
+ },
+ declined: ["nonexistent"], // Declined and not supported.
+ },
+ };
+
+ _("Record: " + JSON.stringify(meta));
+
+ let manager = getEngineManager();
+ manager._engines.petrol.enabled = true;
+ manager._engines.diesel.enabled = true;
+ manager._engines.dummy.enabled = true;
+ manager._engines.actual.enabled = false; // Disabled but not declined.
+
+ manager.decline(["localdecline"]); // Declined and not supported.
+
+ let declinedEngines = new DeclinedEngines(Service);
+
+ function onNotDeclined(subject, topic, data) {
+ Observers.remove("weave:engines:notdeclined", onNotDeclined);
+ Assert.ok(
+ subject.undecided.has("actual"),
+ "EngineManager observed that 'actual' was undecided."
+ );
+
+ let declined = manager.getDeclined();
+ _("Declined: " + JSON.stringify(declined));
+
+ Assert.equal(
+ declined.indexOf("actual"),
+ -1,
+ "'actual' is locally disabled, but not marked as declined."
+ );
+
+ Assert.equal(
+ declined.indexOf("clients"),
+ -1,
+ "'clients' is enabled and not remotely declined."
+ );
+ Assert.equal(
+ declined.indexOf("petrol"),
+ -1,
+ "'petrol' is enabled and not remotely declined."
+ );
+ Assert.equal(
+ declined.indexOf("diesel"),
+ -1,
+ "'diesel' is enabled and not remotely declined."
+ );
+ Assert.equal(
+ declined.indexOf("dummy"),
+ -1,
+ "'dummy' is enabled and not remotely declined."
+ );
+
+ Assert.ok(
+ 0 <= declined.indexOf("nonexistent"),
+ "'nonexistent' was declined on the server."
+ );
+
+ Assert.ok(
+ 0 <= declined.indexOf("localdecline"),
+ "'localdecline' was declined locally."
+ );
+
+ // The meta/global is modified, too.
+ Assert.ok(
+ 0 <= meta.payload.declined.indexOf("nonexistent"),
+ "meta/global's declined contains 'nonexistent'."
+ );
+ Assert.ok(
+ 0 <= meta.payload.declined.indexOf("localdecline"),
+ "meta/global's declined contains 'localdecline'."
+ );
+ Assert.strictEqual(true, meta.changed, "meta/global was changed.");
+ }
+
+ Observers.add("weave:engines:notdeclined", onNotDeclined);
+
+ declinedEngines.updateDeclined(meta, manager);
+});
diff --git a/services/sync/tests/unit/test_disconnect_shutdown.js b/services/sync/tests/unit/test_disconnect_shutdown.js
new file mode 100644
index 0000000000..0606c93a7d
--- /dev/null
+++ b/services/sync/tests/unit/test_disconnect_shutdown.js
@@ -0,0 +1,101 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { SyncDisconnect, SyncDisconnectInternal } = ChromeUtils.importESModule(
+ "resource://services-sync/SyncDisconnect.sys.mjs"
+);
+const { AsyncShutdown } = ChromeUtils.importESModule(
+ "resource://gre/modules/AsyncShutdown.sys.mjs"
+);
+const { PREF_LAST_FXA_USER } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccountsCommon.sys.mjs"
+);
+
+add_task(async function test_shutdown_blocker() {
+ let spySignout = sinon.stub(
+ SyncDisconnectInternal,
+ "doSyncAndAccountDisconnect"
+ );
+
+ // We don't need to check for the lock regularly as we end up aborting the wait.
+ SyncDisconnectInternal.lockRetryInterval = 1000;
+ // Force the retry count to a very large value - this test should never
+ // abort due to the retry count and we want the test to fail (aka timeout)
+ // should our abort code not work.
+ SyncDisconnectInternal.lockRetryCount = 10000;
+ // mock the "browser" sanitize function - it should not be called by
+ // this test.
+ let spyBrowser = sinon.stub(SyncDisconnectInternal, "doSanitizeBrowserData");
+ // mock Sync
+ let mockEngine1 = {
+ enabled: true,
+ name: "Test Engine 1",
+ wipeClient: sinon.spy(),
+ };
+ let mockEngine2 = {
+ enabled: false,
+ name: "Test Engine 2",
+ wipeClient: sinon.spy(),
+ };
+
+ // This weave mock never gives up the lock.
+ let Weave = {
+ Service: {
+ enabled: true,
+ lock: () => false, // so we never get the lock.
+ unlock: sinon.spy(),
+
+ engineManager: {
+ getAll: sinon.stub().returns([mockEngine1, mockEngine2]),
+ },
+ errorHandler: {
+ resetFileLog: sinon.spy(),
+ },
+ },
+ };
+ let weaveStub = sinon.stub(SyncDisconnectInternal, "getWeave");
+ weaveStub.returns(Weave);
+
+ Services.prefs.setStringPref(PREF_LAST_FXA_USER, "dGVzdEBleGFtcGxlLmNvbQ==");
+
+ let promiseDisconnected = SyncDisconnect.disconnect(true);
+
+ // Pretend we hit the shutdown blocker.
+ info("simulating quitApplicationGranted");
+ Services.prefs.setBoolPref("toolkit.asyncshutdown.testing", true);
+ AsyncShutdown.quitApplicationGranted._trigger();
+ Services.prefs.clearUserPref("toolkit.asyncshutdown.testing");
+
+ info("waiting for disconnect to complete");
+ await promiseDisconnected;
+
+ Assert.ok(
+ !Services.prefs.prefHasUserValue(PREF_LAST_FXA_USER),
+ "Should have reset different user warning pref"
+ );
+ Assert.equal(
+ Weave.Service.unlock.callCount,
+ 0,
+ "should not have unlocked at the end"
+ );
+ Assert.ok(!Weave.Service.enabled, "Weave should be and remain disabled");
+ Assert.equal(
+ Weave.Service.errorHandler.resetFileLog.callCount,
+ 1,
+ "should have reset the log"
+ );
+ Assert.equal(
+ mockEngine1.wipeClient.callCount,
+ 1,
+ "enabled engine should have been wiped"
+ );
+ Assert.equal(
+ mockEngine2.wipeClient.callCount,
+ 0,
+ "disabled engine should not have been wiped"
+ );
+ Assert.equal(spyBrowser.callCount, 1, "should not sanitize the browser");
+ Assert.equal(spySignout.callCount, 1, "should have signed out of FxA");
+});
diff --git a/services/sync/tests/unit/test_engine.js b/services/sync/tests/unit/test_engine.js
new file mode 100644
index 0000000000..31a08d5bc9
--- /dev/null
+++ b/services/sync/tests/unit/test_engine.js
@@ -0,0 +1,246 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Observers } = ChromeUtils.importESModule(
+ "resource://services-common/observers.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function SteamStore(engine) {
+ Store.call(this, "Steam", engine);
+ this.wasWiped = false;
+}
+SteamStore.prototype = {
+ async wipe() {
+ this.wasWiped = true;
+ },
+};
+Object.setPrototypeOf(SteamStore.prototype, Store.prototype);
+
+function SteamTracker(name, engine) {
+ LegacyTracker.call(this, name || "Steam", engine);
+}
+Object.setPrototypeOf(SteamTracker.prototype, LegacyTracker.prototype);
+
+function SteamEngine(name, service) {
+ SyncEngine.call(this, name, service);
+ this.wasReset = false;
+ this.wasSynced = false;
+}
+SteamEngine.prototype = {
+ _storeObj: SteamStore,
+ _trackerObj: SteamTracker,
+
+ async _resetClient() {
+ this.wasReset = true;
+ },
+
+ async _sync() {
+ this.wasSynced = true;
+ },
+};
+Object.setPrototypeOf(SteamEngine.prototype, SyncEngine.prototype);
+
+var engineObserver = {
+ topics: [],
+
+ observe(subject, topic, data) {
+ Assert.equal(data, "steam");
+ this.topics.push(topic);
+ },
+
+ reset() {
+ this.topics = [];
+ },
+};
+Observers.add("weave:engine:reset-client:start", engineObserver);
+Observers.add("weave:engine:reset-client:finish", engineObserver);
+Observers.add("weave:engine:wipe-client:start", engineObserver);
+Observers.add("weave:engine:wipe-client:finish", engineObserver);
+Observers.add("weave:engine:sync:start", engineObserver);
+Observers.add("weave:engine:sync:finish", engineObserver);
+
+async function cleanup(engine) {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ engine.wasReset = false;
+ engine.wasSynced = false;
+ engineObserver.reset();
+ await engine._tracker.clearChangedIDs();
+ await engine.finalize();
+}
+
+add_task(async function test_members() {
+ _("Engine object members");
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ Assert.equal(engine.Name, "Steam");
+ Assert.equal(engine.prefName, "steam");
+ Assert.ok(engine._store instanceof SteamStore);
+ Assert.ok(engine._tracker instanceof SteamTracker);
+});
+
+add_task(async function test_score() {
+ _("Engine.score corresponds to tracker.score and is readonly");
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ Assert.equal(engine.score, 0);
+ engine._tracker.score += 5;
+ Assert.equal(engine.score, 5);
+
+ try {
+ engine.score = 10;
+ } catch (ex) {
+ // Setting an attribute that has a getter produces an error in
+ // Firefox <= 3.6 and is ignored in later versions. Either way,
+ // the attribute's value won't change.
+ }
+ Assert.equal(engine.score, 5);
+});
+
+add_task(async function test_resetClient() {
+ _("Engine.resetClient calls _resetClient");
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ Assert.ok(!engine.wasReset);
+
+ await engine.resetClient();
+ Assert.ok(engine.wasReset);
+ Assert.equal(engineObserver.topics[0], "weave:engine:reset-client:start");
+ Assert.equal(engineObserver.topics[1], "weave:engine:reset-client:finish");
+
+ await cleanup(engine);
+});
+
+add_task(async function test_invalidChangedIDs() {
+ _("Test that invalid changed IDs on disk don't end up live.");
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ let tracker = engine._tracker;
+
+ await tracker._beforeSave();
+ await IOUtils.writeUTF8(tracker._storage.path, "5", {
+ tmpPath: tracker._storage.path + ".tmp",
+ });
+
+ ok(!tracker._storage.dataReady);
+ const changes = await tracker.getChangedIDs();
+ changes.placeholder = true;
+ deepEqual(
+ changes,
+ { placeholder: true },
+ "Accessing changed IDs should load changes from disk as a side effect"
+ );
+ ok(tracker._storage.dataReady);
+
+ Assert.ok(changes.placeholder);
+ await cleanup(engine);
+});
+
+add_task(async function test_wipeClient() {
+ _("Engine.wipeClient calls resetClient, wipes store, clears changed IDs");
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ Assert.ok(!engine.wasReset);
+ Assert.ok(!engine._store.wasWiped);
+ Assert.ok(await engine._tracker.addChangedID("a-changed-id"));
+ let changes = await engine._tracker.getChangedIDs();
+ Assert.ok("a-changed-id" in changes);
+
+ await engine.wipeClient();
+ Assert.ok(engine.wasReset);
+ Assert.ok(engine._store.wasWiped);
+ changes = await engine._tracker.getChangedIDs();
+ Assert.equal(JSON.stringify(changes), "{}");
+ Assert.equal(engineObserver.topics[0], "weave:engine:wipe-client:start");
+ Assert.equal(engineObserver.topics[1], "weave:engine:reset-client:start");
+ Assert.equal(engineObserver.topics[2], "weave:engine:reset-client:finish");
+ Assert.equal(engineObserver.topics[3], "weave:engine:wipe-client:finish");
+
+ await cleanup(engine);
+});
+
+add_task(async function test_enabled() {
+ _("Engine.enabled corresponds to preference");
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ try {
+ Assert.ok(!engine.enabled);
+ Svc.PrefBranch.setBoolPref("engine.steam", true);
+ Assert.ok(engine.enabled);
+
+ engine.enabled = false;
+ Assert.ok(!Svc.PrefBranch.getBoolPref("engine.steam"));
+ } finally {
+ await cleanup(engine);
+ }
+});
+
+add_task(async function test_sync() {
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ try {
+ _("Engine.sync doesn't call _sync if it's not enabled");
+ Assert.ok(!engine.enabled);
+ Assert.ok(!engine.wasSynced);
+ await engine.sync();
+
+ Assert.ok(!engine.wasSynced);
+
+ _("Engine.sync calls _sync if it's enabled");
+ engine.enabled = true;
+
+ await engine.sync();
+ Assert.ok(engine.wasSynced);
+ Assert.equal(engineObserver.topics[0], "weave:engine:sync:start");
+ Assert.equal(engineObserver.topics[1], "weave:engine:sync:finish");
+ } finally {
+ await cleanup(engine);
+ }
+});
+
+add_task(async function test_disabled_no_track() {
+ _("When an engine is disabled, its tracker is not tracking.");
+ let engine = new SteamEngine("Steam", Service);
+ await engine.initialize();
+ let tracker = engine._tracker;
+ Assert.equal(engine, tracker.engine);
+
+ Assert.ok(!engine.enabled);
+ Assert.ok(!tracker._isTracking);
+ let changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+
+ Assert.ok(!tracker.engineIsEnabled());
+ Assert.ok(!tracker._isTracking);
+ changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+
+ let promisePrefChangeHandled = Promise.withResolvers();
+ const origMethod = tracker.onEngineEnabledChanged;
+ tracker.onEngineEnabledChanged = async (...args) => {
+ await origMethod.apply(tracker, args);
+ promisePrefChangeHandled.resolve();
+ };
+
+ engine.enabled = true; // Also enables the tracker automatically.
+ await promisePrefChangeHandled.promise;
+ Assert.ok(tracker._isTracking);
+ changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+
+ await tracker.addChangedID("abcdefghijkl");
+ changes = await tracker.getChangedIDs();
+ Assert.ok(0 < changes.abcdefghijkl);
+ promisePrefChangeHandled = Promise.withResolvers();
+ Svc.PrefBranch.setBoolPref("engine." + engine.prefName, false);
+ await promisePrefChangeHandled.promise;
+ Assert.ok(!tracker._isTracking);
+ changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+
+ await cleanup(engine);
+});
diff --git a/services/sync/tests/unit/test_engine_abort.js b/services/sync/tests/unit/test_engine_abort.js
new file mode 100644
index 0000000000..f9bbf9d338
--- /dev/null
+++ b/services/sync/tests/unit/test_engine_abort.js
@@ -0,0 +1,79 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { RotaryEngine } = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/rotaryengine.sys.mjs"
+);
+
+add_task(async function test_processIncoming_abort() {
+ _(
+ "An abort exception, raised in applyIncoming, will abort _processIncoming."
+ );
+ let engine = new RotaryEngine(Service);
+
+ let collection = new ServerCollection();
+ let id = Utils.makeGUID();
+ let payload = encryptPayload({ id, denomination: "Record No. " + id });
+ collection.insert(id, payload);
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ _("Create some server data.");
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+ _("Fake applyIncoming to abort.");
+ engine._store.applyIncoming = async function (record) {
+ let ex = {
+ code: SyncEngine.prototype.eEngineAbortApplyIncoming,
+ cause: "Nooo",
+ };
+ _("Throwing: " + JSON.stringify(ex));
+ throw ex;
+ };
+
+ _("Trying _processIncoming. It will throw after aborting.");
+ let err;
+ try {
+ await engine._syncStartup();
+ await engine._processIncoming();
+ } catch (ex) {
+ err = ex;
+ }
+
+ Assert.equal(err, "Nooo");
+ err = undefined;
+
+ _("Trying engine.sync(). It will abort without error.");
+ try {
+ // This will quietly fail.
+ await engine.sync();
+ } catch (ex) {
+ err = ex;
+ }
+
+ Assert.equal(err, undefined);
+
+ await promiseStopServer(server);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Service.recordManager.clearCache();
+
+ await engine._tracker.clearChangedIDs();
+ await engine.finalize();
+});
diff --git a/services/sync/tests/unit/test_engine_changes_during_sync.js b/services/sync/tests/unit/test_engine_changes_during_sync.js
new file mode 100644
index 0000000000..891bea41ec
--- /dev/null
+++ b/services/sync/tests/unit/test_engine_changes_during_sync.js
@@ -0,0 +1,611 @@
+const { FormHistory } = ChromeUtils.importESModule(
+ "resource://gre/modules/FormHistory.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Bookmark, BookmarkFolder, BookmarkQuery } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/bookmarks.sys.mjs"
+);
+const { HistoryRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/history.sys.mjs"
+);
+const { FormRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/forms.sys.mjs"
+);
+const { LoginRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/passwords.sys.mjs"
+);
+const { PrefRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/prefs.sys.mjs"
+);
+
+const LoginInfo = Components.Constructor(
+ "@mozilla.org/login-manager/loginInfo;1",
+ Ci.nsILoginInfo,
+ "init"
+);
+
+/**
+ * We don't test the clients or tabs engines because neither has conflict
+ * resolution logic. The clients engine syncs twice per global sync, and
+ * custom conflict resolution logic for commands that doesn't use
+ * timestamps. Tabs doesn't have conflict resolution at all, since it's
+ * read-only.
+ */
+
+async function assertChildGuids(folderGuid, expectedChildGuids, message) {
+ let tree = await PlacesUtils.promiseBookmarksTree(folderGuid);
+ let childGuids = tree.children.map(child => child.guid);
+ deepEqual(childGuids, expectedChildGuids, message);
+}
+
+async function cleanup(engine, server) {
+ await engine._tracker.stop();
+ await engine._store.wipe();
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Service.recordManager.clearCache();
+ await promiseStopServer(server);
+}
+
+add_task(async function test_history_change_during_sync() {
+ _("Ensure that we don't bump the score when applying history records.");
+
+ enableValidationPrefs();
+
+ let engine = Service.engineManager.get("history");
+ let server = await serverForEnginesWithKeys({ foo: "password" }, [engine]);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("history");
+
+ // Override `uploadOutgoing` to insert a record while we're applying
+ // changes. The tracker should ignore this change.
+ let uploadOutgoing = engine._uploadOutgoing;
+ engine._uploadOutgoing = async function () {
+ engine._uploadOutgoing = uploadOutgoing;
+ try {
+ await uploadOutgoing.call(this);
+ } finally {
+ _("Inserting local history visit");
+ await addVisit("during_sync");
+ await engine._tracker.asyncObserver.promiseObserversComplete();
+ }
+ };
+
+ engine._tracker.start();
+
+ try {
+ let remoteRec = new HistoryRec("history", "UrOOuzE5QM-e");
+ remoteRec.histUri = "http://getfirefox.com/";
+ remoteRec.title = "Get Firefox!";
+ remoteRec.visits = [
+ {
+ date: PlacesUtils.toPRTime(Date.now()),
+ type: PlacesUtils.history.TRANSITION_TYPED,
+ },
+ ];
+ collection.insert(remoteRec.id, encryptPayload(remoteRec.cleartext));
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score for visits added during sync"
+ );
+
+ equal(
+ collection.count(),
+ 1,
+ "New local visit should not exist on server after first sync"
+ );
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score during second history sync"
+ );
+
+ equal(
+ collection.count(),
+ 2,
+ "New local visit should exist on server after second sync"
+ );
+ } finally {
+ engine._uploadOutgoing = uploadOutgoing;
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_passwords_change_during_sync() {
+ _("Ensure that we don't bump the score when applying passwords.");
+
+ enableValidationPrefs();
+
+ let engine = Service.engineManager.get("passwords");
+ let server = await serverForEnginesWithKeys({ foo: "password" }, [engine]);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("passwords");
+
+ let uploadOutgoing = engine._uploadOutgoing;
+ engine._uploadOutgoing = async function () {
+ engine._uploadOutgoing = uploadOutgoing;
+ try {
+ await uploadOutgoing.call(this);
+ } finally {
+ _("Inserting local password");
+ let login = new LoginInfo(
+ "https://example.com",
+ "",
+ null,
+ "username",
+ "password",
+ "",
+ ""
+ );
+ await Services.logins.addLoginAsync(login);
+ await engine._tracker.asyncObserver.promiseObserversComplete();
+ }
+ };
+
+ engine._tracker.start();
+
+ try {
+ let remoteRec = new LoginRec(
+ "passwords",
+ "{765e3d6e-071d-d640-a83d-81a7eb62d3ed}"
+ );
+ remoteRec.formSubmitURL = "";
+ remoteRec.httpRealm = "";
+ remoteRec.hostname = "https://mozilla.org";
+ remoteRec.username = "username";
+ remoteRec.password = "sekrit";
+ remoteRec.timeCreated = Date.now();
+ remoteRec.timePasswordChanged = Date.now();
+ collection.insert(remoteRec.id, encryptPayload(remoteRec.cleartext));
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score for passwords added during first sync"
+ );
+
+ equal(
+ collection.count(),
+ 1,
+ "New local password should not exist on server after first sync"
+ );
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score during second passwords sync"
+ );
+
+ equal(
+ collection.count(),
+ 2,
+ "New local password should exist on server after second sync"
+ );
+ } finally {
+ engine._uploadOutgoing = uploadOutgoing;
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_prefs_change_during_sync() {
+ _("Ensure that we don't bump the score when applying prefs.");
+
+ const TEST_PREF = "test.duringSync";
+ // create a "control pref" for the pref we sync.
+ Services.prefs.setBoolPref("services.sync.prefs.sync.test.duringSync", true);
+
+ enableValidationPrefs();
+
+ let engine = Service.engineManager.get("prefs");
+ let server = await serverForEnginesWithKeys({ foo: "password" }, [engine]);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("prefs");
+
+ let uploadOutgoing = engine._uploadOutgoing;
+ engine._uploadOutgoing = async function () {
+ engine._uploadOutgoing = uploadOutgoing;
+ try {
+ await uploadOutgoing.call(this);
+ } finally {
+ _("Updating local pref value");
+ // Change the value of a synced pref.
+ Services.prefs.setStringPref(TEST_PREF, "hello");
+ await engine._tracker.asyncObserver.promiseObserversComplete();
+ }
+ };
+
+ engine._tracker.start();
+
+ try {
+ // All synced prefs are stored in a single record, so we'll only ever
+ // have one record on the server. This test just checks that we don't
+ // track or upload prefs changed during the sync.
+ let guid = CommonUtils.encodeBase64URL(Services.appinfo.ID);
+ let remoteRec = new PrefRec("prefs", guid);
+ remoteRec.value = {
+ [TEST_PREF]: "world",
+ };
+ collection.insert(remoteRec.id, encryptPayload(remoteRec.cleartext));
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score for prefs added during first sync"
+ );
+ let payloads = collection.payloads();
+ equal(
+ payloads.length,
+ 1,
+ "Should not upload multiple prefs records after first sync"
+ );
+ equal(
+ payloads[0].value[TEST_PREF],
+ "world",
+ "Should not upload pref value changed during first sync"
+ );
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score during second prefs sync"
+ );
+ payloads = collection.payloads();
+ equal(
+ payloads.length,
+ 1,
+ "Should not upload multiple prefs records after second sync"
+ );
+ equal(
+ payloads[0].value[TEST_PREF],
+ "hello",
+ "Should upload changed pref value during second sync"
+ );
+ } finally {
+ engine._uploadOutgoing = uploadOutgoing;
+ await cleanup(engine, server);
+ Services.prefs.clearUserPref(TEST_PREF);
+ }
+});
+
+add_task(async function test_forms_change_during_sync() {
+ _("Ensure that we don't bump the score when applying form records.");
+
+ enableValidationPrefs();
+
+ let engine = Service.engineManager.get("forms");
+ let server = await serverForEnginesWithKeys({ foo: "password" }, [engine]);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("forms");
+
+ let uploadOutgoing = engine._uploadOutgoing;
+ engine._uploadOutgoing = async function () {
+ engine._uploadOutgoing = uploadOutgoing;
+ try {
+ await uploadOutgoing.call(this);
+ } finally {
+ _("Inserting local form history entry");
+ await FormHistory.update([
+ {
+ op: "add",
+ fieldname: "favoriteDrink",
+ value: "cocoa",
+ },
+ ]);
+ await engine._tracker.asyncObserver.promiseObserversComplete();
+ }
+ };
+
+ engine._tracker.start();
+
+ try {
+ // Add an existing remote form history entry. We shouldn't bump the score when
+ // we apply this record.
+ let remoteRec = new FormRec("forms", "Tl9dHgmJSR6FkyxS");
+ remoteRec.name = "name";
+ remoteRec.value = "alice";
+ collection.insert(remoteRec.id, encryptPayload(remoteRec.cleartext));
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score for forms added during first sync"
+ );
+
+ equal(
+ collection.count(),
+ 1,
+ "New local form should not exist on server after first sync"
+ );
+
+ await sync_engine_and_validate_telem(engine, true);
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should not bump global score during second forms sync"
+ );
+
+ equal(
+ collection.count(),
+ 2,
+ "New local form should exist on server after second sync"
+ );
+ } finally {
+ engine._uploadOutgoing = uploadOutgoing;
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_bookmark_change_during_sync() {
+ _("Ensure that we track bookmark changes made during a sync.");
+
+ enableValidationPrefs();
+ let schedulerProto = Object.getPrototypeOf(Service.scheduler);
+ let syncThresholdDescriptor = Object.getOwnPropertyDescriptor(
+ schedulerProto,
+ "syncThreshold"
+ );
+ Object.defineProperty(Service.scheduler, "syncThreshold", {
+ // Trigger resync if any changes exist, rather than deciding based on the
+ // normal sync threshold.
+ get: () => 0,
+ });
+
+ let engine = Service.engineManager.get("bookmarks");
+ let server = await serverForEnginesWithKeys({ foo: "password" }, [engine]);
+ await SyncTestingInfrastructure(server);
+
+ // Already-tracked bookmarks that shouldn't be uploaded during the first sync.
+ let bzBmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.menuGuid,
+ url: "https://bugzilla.mozilla.org/",
+ title: "Bugzilla",
+ });
+ _(`Bugzilla GUID: ${bzBmk.guid}`);
+
+ await PlacesTestUtils.setBookmarkSyncFields({
+ guid: bzBmk.guid,
+ syncChangeCounter: 0,
+ syncStatus: PlacesUtils.bookmarks.SYNC_STATUS.NORMAL,
+ });
+
+ let collection = server.user("foo").collection("bookmarks");
+
+ let bmk3; // New child of Folder 1, created locally during sync.
+
+ let uploadOutgoing = engine._uploadOutgoing;
+ engine._uploadOutgoing = async function () {
+ engine._uploadOutgoing = uploadOutgoing;
+ try {
+ await uploadOutgoing.call(this);
+ } finally {
+ _("Inserting bookmark into local store");
+ bmk3 = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "https://mozilla.org/",
+ title: "Mozilla",
+ });
+ await engine._tracker.asyncObserver.promiseObserversComplete();
+ }
+ };
+
+ // New bookmarks that should be uploaded during the first sync.
+ let folder1 = await PlacesUtils.bookmarks.insert({
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ title: "Folder 1",
+ });
+ _(`Folder GUID: ${folder1.guid}`);
+
+ let tbBmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: folder1.guid,
+ url: "http://getthunderbird.com/",
+ title: "Get Thunderbird!",
+ });
+ _(`Thunderbird GUID: ${tbBmk.guid}`);
+
+ engine._tracker.start();
+
+ try {
+ let bmk2_guid = "get-firefox1"; // New child of Folder 1, created remotely.
+ let folder2_guid = "folder2-1111"; // New folder, created remotely.
+ let tagQuery_guid = "tag-query111"; // New tag query child of Folder 2, created remotely.
+ let bmk4_guid = "example-org1"; // New tagged child of Folder 2, created remotely.
+ {
+ // An existing record changed on the server that should not trigger
+ // another sync when applied.
+ let remoteBzBmk = new Bookmark("bookmarks", bzBmk.guid);
+ remoteBzBmk.bmkUri = "https://bugzilla.mozilla.org/";
+ remoteBzBmk.description = "New description";
+ remoteBzBmk.title = "Bugzilla";
+ remoteBzBmk.tags = ["new", "tags"];
+ remoteBzBmk.parentName = "Bookmarks Menu";
+ remoteBzBmk.parentid = "menu";
+ collection.insert(bzBmk.guid, encryptPayload(remoteBzBmk.cleartext));
+
+ let remoteFolder = new BookmarkFolder("bookmarks", folder2_guid);
+ remoteFolder.title = "Folder 2";
+ remoteFolder.children = [bmk4_guid, tagQuery_guid];
+ remoteFolder.parentName = "Bookmarks Menu";
+ remoteFolder.parentid = "menu";
+ collection.insert(folder2_guid, encryptPayload(remoteFolder.cleartext));
+
+ let remoteFxBmk = new Bookmark("bookmarks", bmk2_guid);
+ remoteFxBmk.bmkUri = "http://getfirefox.com/";
+ remoteFxBmk.description = "Firefox is awesome.";
+ remoteFxBmk.title = "Get Firefox!";
+ remoteFxBmk.tags = ["firefox", "awesome", "browser"];
+ remoteFxBmk.keyword = "awesome";
+ remoteFxBmk.parentName = "Folder 1";
+ remoteFxBmk.parentid = folder1.guid;
+ collection.insert(bmk2_guid, encryptPayload(remoteFxBmk.cleartext));
+
+ // A tag query referencing a nonexistent tag folder, which we should
+ // create locally when applying the record.
+ let remoteTagQuery = new BookmarkQuery("bookmarks", tagQuery_guid);
+ remoteTagQuery.bmkUri = "place:type=7&folder=999";
+ remoteTagQuery.title = "Taggy tags";
+ remoteTagQuery.folderName = "taggy";
+ remoteTagQuery.parentName = "Folder 2";
+ remoteTagQuery.parentid = folder2_guid;
+ collection.insert(
+ tagQuery_guid,
+ encryptPayload(remoteTagQuery.cleartext)
+ );
+
+ // A bookmark that should appear in the results for the tag query.
+ let remoteTaggedBmk = new Bookmark("bookmarks", bmk4_guid);
+ remoteTaggedBmk.bmkUri = "https://example.org/";
+ remoteTaggedBmk.title = "Tagged bookmark";
+ remoteTaggedBmk.tags = ["taggy"];
+ remoteTaggedBmk.parentName = "Folder 2";
+ remoteTaggedBmk.parentid = folder2_guid;
+ collection.insert(bmk4_guid, encryptPayload(remoteTaggedBmk.cleartext));
+
+ collection.insert(
+ "toolbar",
+ encryptPayload({
+ id: "toolbar",
+ type: "folder",
+ title: "toolbar",
+ children: [folder1.guid],
+ parentName: "places",
+ parentid: "places",
+ })
+ );
+
+ collection.insert(
+ "menu",
+ encryptPayload({
+ id: "menu",
+ type: "folder",
+ title: "menu",
+ children: [bzBmk.guid, folder2_guid],
+ parentName: "places",
+ parentid: "places",
+ })
+ );
+
+ collection.insert(
+ folder1.guid,
+ encryptPayload({
+ id: folder1.guid,
+ type: "folder",
+ title: "Folder 1",
+ children: [bmk2_guid],
+ parentName: "toolbar",
+ parentid: "toolbar",
+ })
+ );
+ }
+
+ await assertChildGuids(
+ folder1.guid,
+ [tbBmk.guid],
+ "Folder should have 1 child before first sync"
+ );
+
+ let pingsPromise = wait_for_pings(2);
+
+ let changes = await PlacesSyncUtils.bookmarks.pullChanges();
+ deepEqual(
+ Object.keys(changes).sort(),
+ [folder1.guid, tbBmk.guid, "menu", "mobile", "toolbar", "unfiled"].sort(),
+ "Should track bookmark and folder created before first sync"
+ );
+
+ // Unlike the tests above, we can't use `sync_engine_and_validate_telem`
+ // because the bookmarks engine will automatically schedule a follow-up
+ // sync for us.
+ _("Perform first sync and immediate follow-up sync");
+ Service.sync({ engines: ["bookmarks"] });
+
+ let pings = await pingsPromise;
+ equal(pings.length, 2, "Should submit two pings");
+ ok(
+ pings.every(p => {
+ assert_success_ping(p);
+ return p.syncs.length == 1;
+ }),
+ "Should submit 1 sync per ping"
+ );
+
+ strictEqual(
+ Service.scheduler.globalScore,
+ 0,
+ "Should reset global score after follow-up sync"
+ );
+ ok(bmk3, "Should insert bookmark during first sync to simulate change");
+ ok(
+ collection.wbo(bmk3.guid),
+ "Changed bookmark should be uploaded after follow-up sync"
+ );
+
+ let bmk2 = await PlacesUtils.bookmarks.fetch({
+ guid: bmk2_guid,
+ });
+ ok(bmk2, "Remote bookmark should be applied during first sync");
+ {
+ // We only check child GUIDs, and not their order, because the exact
+ // order is an implementation detail.
+ let folder1Children = await PlacesSyncUtils.bookmarks.fetchChildRecordIds(
+ folder1.guid
+ );
+ deepEqual(
+ folder1Children.sort(),
+ [bmk2_guid, tbBmk.guid, bmk3.guid].sort(),
+ "Folder 1 should have 3 children after first sync"
+ );
+ }
+ await assertChildGuids(
+ folder2_guid,
+ [bmk4_guid, tagQuery_guid],
+ "Folder 2 should have 2 children after first sync"
+ );
+ let taggedURIs = [];
+ await PlacesUtils.bookmarks.fetch({ tags: ["taggy"] }, b =>
+ taggedURIs.push(b.url)
+ );
+ equal(taggedURIs.length, 1, "Should have 1 tagged URI");
+ equal(
+ taggedURIs[0].href,
+ "https://example.org/",
+ "Synced tagged bookmark should appear in tagged URI list"
+ );
+
+ changes = await PlacesSyncUtils.bookmarks.pullChanges();
+ deepEqual(
+ changes,
+ {},
+ "Should have already uploaded changes in follow-up sync"
+ );
+
+ // First ping won't include validation data, since we've changed bookmarks
+ // and `canValidate` will indicate it can't proceed.
+ let engineData = pings.map(p => {
+ return p.syncs[0].engines.find(e => e.name == "bookmarks-buffered");
+ });
+ ok(engineData[0].validation, "Engine should validate after first sync");
+ ok(engineData[1].validation, "Engine should validate after second sync");
+ } finally {
+ Object.defineProperty(
+ schedulerProto,
+ "syncThreshold",
+ syncThresholdDescriptor
+ );
+ engine._uploadOutgoing = uploadOutgoing;
+ await cleanup(engine, server);
+ }
+});
diff --git a/services/sync/tests/unit/test_enginemanager.js b/services/sync/tests/unit/test_enginemanager.js
new file mode 100644
index 0000000000..3e366be54f
--- /dev/null
+++ b/services/sync/tests/unit/test_enginemanager.js
@@ -0,0 +1,232 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function PetrolEngine() {}
+PetrolEngine.prototype.name = "petrol";
+PetrolEngine.prototype.finalize = async function () {};
+
+function DieselEngine() {}
+DieselEngine.prototype.name = "diesel";
+DieselEngine.prototype.finalize = async function () {};
+
+function DummyEngine() {}
+DummyEngine.prototype.name = "dummy";
+DummyEngine.prototype.finalize = async function () {};
+
+class ActualEngine extends SyncEngine {
+ constructor(service) {
+ super("Actual", service);
+ }
+}
+
+add_task(async function test_basics() {
+ _("We start out with a clean slate");
+
+ let manager = new EngineManager(Service);
+
+ let engines = await manager.getAll();
+ Assert.equal(engines.length, 0);
+ Assert.equal(await manager.get("dummy"), undefined);
+
+ _("Register an engine");
+ await manager.register(DummyEngine);
+ let dummy = await manager.get("dummy");
+ Assert.ok(dummy instanceof DummyEngine);
+
+ engines = await manager.getAll();
+ Assert.equal(engines.length, 1);
+ Assert.equal(engines[0], dummy);
+
+ _("Register an already registered engine is ignored");
+ await manager.register(DummyEngine);
+ Assert.equal(await manager.get("dummy"), dummy);
+
+ _("Register multiple engines in one go");
+ await manager.register([PetrolEngine, DieselEngine]);
+ let petrol = await manager.get("petrol");
+ let diesel = await manager.get("diesel");
+ Assert.ok(petrol instanceof PetrolEngine);
+ Assert.ok(diesel instanceof DieselEngine);
+
+ engines = await manager.getAll();
+ Assert.equal(engines.length, 3);
+ Assert.notEqual(engines.indexOf(petrol), -1);
+ Assert.notEqual(engines.indexOf(diesel), -1);
+
+ _("Retrieve multiple engines in one go");
+ engines = await manager.get(["dummy", "diesel"]);
+ Assert.equal(engines.length, 2);
+ Assert.notEqual(engines.indexOf(dummy), -1);
+ Assert.notEqual(engines.indexOf(diesel), -1);
+
+ _("getEnabled() only returns enabled engines");
+ engines = await manager.getEnabled();
+ Assert.equal(engines.length, 0);
+
+ petrol.enabled = true;
+ engines = await manager.getEnabled();
+ Assert.equal(engines.length, 1);
+ Assert.equal(engines[0], petrol);
+
+ dummy.enabled = true;
+ diesel.enabled = true;
+ engines = await manager.getEnabled();
+ Assert.equal(engines.length, 3);
+
+ _("getEnabled() returns enabled engines in sorted order");
+ petrol.syncPriority = 1;
+ dummy.syncPriority = 2;
+ diesel.syncPriority = 3;
+
+ engines = await manager.getEnabled();
+
+ Assert.deepEqual(engines, [petrol, dummy, diesel]);
+
+ _("Changing the priorities should change the order in getEnabled()");
+
+ dummy.syncPriority = 4;
+
+ engines = await manager.getEnabled();
+
+ Assert.deepEqual(engines, [petrol, diesel, dummy]);
+
+ _("Unregister an engine by name");
+ await manager.unregister("dummy");
+ Assert.equal(await manager.get("dummy"), undefined);
+ engines = await manager.getAll();
+ Assert.equal(engines.length, 2);
+ Assert.equal(engines.indexOf(dummy), -1);
+
+ _("Unregister an engine by value");
+ // manager.unregister() checks for instanceof Engine, so let's make one:
+ await manager.register(ActualEngine);
+ let actual = await manager.get("actual");
+ Assert.ok(actual instanceof ActualEngine);
+ Assert.ok(actual instanceof SyncEngine);
+
+ await manager.unregister(actual);
+ Assert.equal(await manager.get("actual"), undefined);
+});
+
+class AutoEngine {
+ constructor(type) {
+ this.name = "automobile";
+ this.type = type;
+ this.initializeCalled = false;
+ this.finalizeCalled = false;
+ this.isActive = false;
+ }
+
+ async initialize() {
+ Assert.ok(!this.initializeCalled);
+ Assert.equal(AutoEngine.current, undefined);
+ this.initializeCalled = true;
+ this.isActive = true;
+ AutoEngine.current = this;
+ }
+
+ async finalize() {
+ Assert.equal(AutoEngine.current, this);
+ Assert.ok(!this.finalizeCalled);
+ Assert.ok(this.isActive);
+ this.finalizeCalled = true;
+ this.isActive = false;
+ AutoEngine.current = undefined;
+ }
+}
+
+class GasolineEngine extends AutoEngine {
+ constructor() {
+ super("gasoline");
+ }
+}
+
+class ElectricEngine extends AutoEngine {
+ constructor() {
+ super("electric");
+ }
+}
+
+add_task(async function test_alternates() {
+ let manager = new EngineManager(Service);
+ let engines = await manager.getAll();
+ Assert.equal(engines.length, 0);
+
+ const prefName = "services.sync.engines.automobile.electric";
+ Services.prefs.clearUserPref(prefName);
+
+ await manager.registerAlternatives(
+ "automobile",
+ prefName,
+ ElectricEngine,
+ GasolineEngine
+ );
+
+ let gasEngine = manager.get("automobile");
+ Assert.equal(gasEngine.type, "gasoline");
+
+ Assert.ok(gasEngine.isActive);
+ Assert.ok(gasEngine.initializeCalled);
+ Assert.ok(!gasEngine.finalizeCalled);
+ Assert.equal(AutoEngine.current, gasEngine);
+
+ _("Check that setting the controlling pref to false makes no difference");
+ Services.prefs.setBoolPref(prefName, false);
+ Assert.equal(manager.get("automobile"), gasEngine);
+ Assert.ok(gasEngine.isActive);
+ Assert.ok(gasEngine.initializeCalled);
+ Assert.ok(!gasEngine.finalizeCalled);
+
+ _("Even after the call to switchAlternatives");
+ await manager.switchAlternatives();
+ Assert.equal(manager.get("automobile"), gasEngine);
+ Assert.ok(gasEngine.isActive);
+ Assert.ok(gasEngine.initializeCalled);
+ Assert.ok(!gasEngine.finalizeCalled);
+
+ _("Set the pref to true, we still shouldn't switch yet");
+ Services.prefs.setBoolPref(prefName, true);
+ Assert.equal(manager.get("automobile"), gasEngine);
+ Assert.ok(gasEngine.isActive);
+ Assert.ok(gasEngine.initializeCalled);
+ Assert.ok(!gasEngine.finalizeCalled);
+
+ _("Now we expect to switch from gas to electric");
+ await manager.switchAlternatives();
+ let elecEngine = manager.get("automobile");
+ Assert.equal(elecEngine.type, "electric");
+ Assert.ok(elecEngine.isActive);
+ Assert.ok(elecEngine.initializeCalled);
+ Assert.ok(!elecEngine.finalizeCalled);
+ Assert.equal(AutoEngine.current, elecEngine);
+
+ Assert.ok(!gasEngine.isActive);
+ Assert.ok(gasEngine.finalizeCalled);
+
+ _("Switch back, and ensure we get a new instance that got initialized again");
+ Services.prefs.setBoolPref(prefName, false);
+ await manager.switchAlternatives();
+
+ // First make sure we deactivated the electric engine as we should
+ Assert.ok(!elecEngine.isActive);
+ Assert.ok(elecEngine.initializeCalled);
+ Assert.ok(elecEngine.finalizeCalled);
+
+ let newGasEngine = manager.get("automobile");
+ Assert.notEqual(newGasEngine, gasEngine);
+ Assert.equal(newGasEngine.type, "gasoline");
+
+ Assert.ok(newGasEngine.isActive);
+ Assert.ok(newGasEngine.initializeCalled);
+ Assert.ok(!newGasEngine.finalizeCalled);
+
+ _("Make sure unregister removes the alt info too");
+ await manager.unregister("automobile");
+ Assert.equal(manager.get("automobile"), null);
+ Assert.ok(newGasEngine.finalizeCalled);
+ Assert.deepEqual(Object.keys(manager._altEngineInfo), []);
+});
diff --git a/services/sync/tests/unit/test_errorhandler_1.js b/services/sync/tests/unit/test_errorhandler_1.js
new file mode 100644
index 0000000000..2d52b93a02
--- /dev/null
+++ b/services/sync/tests/unit/test_errorhandler_1.js
@@ -0,0 +1,341 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+
+const fakeServer = new SyncServer();
+fakeServer.start();
+const fakeServerUrl = "http://localhost:" + fakeServer.port;
+
+registerCleanupFunction(function () {
+ return promiseStopServer(fakeServer).finally(() => {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ });
+});
+
+let engine;
+add_task(async function setup() {
+ await Service.engineManager.clear();
+ await Service.engineManager.register(EHTestsCommon.CatapultEngine);
+ engine = Service.engineManager.get("catapult");
+});
+
+async function clean() {
+ let promiseLogReset = promiseOneObserver("weave:service:reset-file-log");
+ await Service.startOver();
+ await promiseLogReset;
+ Status.resetSync();
+ Status.resetBackoff();
+ // Move log levels back to trace (startOver will have reversed this), sicne
+ syncTestLogging();
+}
+
+add_task(async function test_401_logout() {
+ enableValidationPrefs();
+
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ // By calling sync, we ensure we're logged in.
+ await sync_and_validate_telem();
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ Assert.ok(Service.isLoggedIn);
+
+ let promiseErrors = new Promise(res => {
+ Svc.Obs.add("weave:service:sync:error", onSyncError);
+ function onSyncError() {
+ _("Got weave:service:sync:error in first sync.");
+ Svc.Obs.remove("weave:service:sync:error", onSyncError);
+
+ // Wait for the automatic next sync.
+ Svc.Obs.add("weave:service:login:error", onLoginError);
+ function onLoginError() {
+ _("Got weave:service:login:error in second sync.");
+ Svc.Obs.remove("weave:service:login:error", onLoginError);
+ res();
+ }
+ }
+ });
+
+ // Make sync fail due to login rejected.
+ await configureIdentity({ username: "janedoe" }, server);
+ Service._updateCachedURLs();
+
+ _("Starting first sync.");
+ await sync_and_validate_telem(ping => {
+ deepEqual(ping.failureReason, { name: "httperror", code: 401 });
+ });
+ _("First sync done.");
+
+ await promiseErrors;
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+ Assert.ok(!Service.isLoggedIn);
+
+ // Clean up.
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_credentials_changed_logout() {
+ enableValidationPrefs();
+
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ // By calling sync, we ensure we're logged in.
+ await sync_and_validate_telem();
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ Assert.ok(Service.isLoggedIn);
+
+ await EHTestsCommon.generateCredentialsChangedFailure();
+
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.sync, CREDENTIALS_CHANGED);
+ deepEqual(ping.failureReason, {
+ name: "unexpectederror",
+ error: "Error: Aborting sync, remote setup failed",
+ });
+ });
+
+ Assert.equal(Status.sync, CREDENTIALS_CHANGED);
+ Assert.ok(!Service.isLoggedIn);
+
+ // Clean up.
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_login_non_network_error() {
+ enableValidationPrefs();
+
+ // Test non-network errors are reported
+ // when calling sync
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+ Service.identity._syncKeyBundle = null;
+
+ await Service.sync();
+ Assert.equal(Status.login, LOGIN_FAILED_NO_PASSPHRASE);
+
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_sync_non_network_error() {
+ enableValidationPrefs();
+
+ // Test non-network errors are reported
+ // when calling sync
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ // By calling sync, we ensure we're logged in.
+ await Service.sync();
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ Assert.ok(Service.isLoggedIn);
+
+ await EHTestsCommon.generateCredentialsChangedFailure();
+
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.sync, CREDENTIALS_CHANGED);
+ deepEqual(ping.failureReason, {
+ name: "unexpectederror",
+ error: "Error: Aborting sync, remote setup failed",
+ });
+ });
+
+ Assert.equal(Status.sync, CREDENTIALS_CHANGED);
+ // If we clean this tick, telemetry won't get the right error
+ await Async.promiseYield();
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_login_sync_network_error() {
+ enableValidationPrefs();
+
+ // Test network errors are reported when calling sync.
+ await configureIdentity({ username: "broken.wipe" });
+ Service.clusterURL = fakeServerUrl;
+
+ await Service.sync();
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+
+ await clean();
+});
+
+add_task(async function test_sync_network_error() {
+ enableValidationPrefs();
+
+ // Test network errors are reported when calling sync.
+ Services.io.offline = true;
+
+ await Service.sync();
+ Assert.equal(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+
+ Services.io.offline = false;
+ await clean();
+});
+
+add_task(async function test_login_non_network_error() {
+ enableValidationPrefs();
+
+ // Test non-network errors are reported
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+ Service.identity._syncKeyBundle = null;
+
+ await Service.sync();
+ Assert.equal(Status.login, LOGIN_FAILED_NO_PASSPHRASE);
+
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_sync_non_network_error() {
+ enableValidationPrefs();
+
+ // Test non-network errors are reported
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ // By calling sync, we ensure we're logged in.
+ await Service.sync();
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ Assert.ok(Service.isLoggedIn);
+
+ await EHTestsCommon.generateCredentialsChangedFailure();
+
+ await Service.sync();
+ Assert.equal(Status.sync, CREDENTIALS_CHANGED);
+
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_login_network_error() {
+ enableValidationPrefs();
+
+ await configureIdentity({ username: "johndoe" });
+ Service.clusterURL = fakeServerUrl;
+
+ // Test network errors are not reported.
+
+ await Service.sync();
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+
+ Services.io.offline = false;
+ await clean();
+});
+
+add_task(async function test_sync_network_error() {
+ enableValidationPrefs();
+
+ // Test network errors are not reported.
+ Services.io.offline = true;
+
+ await Service.sync();
+ Assert.equal(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+
+ Services.io.offline = false;
+ await clean();
+});
+
+add_task(async function test_sync_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test server maintenance errors are not reported.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ const BACKOFF = 42;
+ engine.enabled = true;
+ engine.exception = { status: 503, headers: { "retry-after": BACKOFF } };
+
+ Assert.equal(Status.service, STATUS_OK);
+
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.sync, SERVER_MAINTENANCE);
+ deepEqual(ping.engines.find(e => e.failureReason).failureReason, {
+ name: "httperror",
+ code: 503,
+ });
+ });
+
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ Assert.equal(Status.sync, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_info_collections_login_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test info/collections server maintenance errors are not reported.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ await configureIdentity({ username: "broken.info" }, server);
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ await Service.sync();
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_meta_global_login_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test meta/global server maintenance errors are not reported.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ await configureIdentity({ username: "broken.meta" }, server);
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ await Service.sync();
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_errorhandler_2.js b/services/sync/tests/unit/test_errorhandler_2.js
new file mode 100644
index 0000000000..5cab4d832d
--- /dev/null
+++ b/services/sync/tests/unit/test_errorhandler_2.js
@@ -0,0 +1,550 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+const { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+
+const fakeServer = new SyncServer();
+fakeServer.start();
+
+registerCleanupFunction(function () {
+ return promiseStopServer(fakeServer).finally(() => {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ });
+});
+
+const logsdir = FileUtils.getDir("ProfD", ["weave", "logs"]);
+logsdir.create(Ci.nsIFile.DIRECTORY_TYPE, FileUtils.PERMS_DIRECTORY);
+
+function removeLogFiles() {
+ let entries = logsdir.directoryEntries;
+ while (entries.hasMoreElements()) {
+ let logfile = entries.getNext().QueryInterface(Ci.nsIFile);
+ logfile.remove(false);
+ }
+}
+
+function getLogFiles() {
+ let result = [];
+ let entries = logsdir.directoryEntries;
+ while (entries.hasMoreElements()) {
+ result.push(entries.getNext().QueryInterface(Ci.nsIFile));
+ }
+ return result;
+}
+
+let engine;
+add_task(async function setup() {
+ await Service.engineManager.clear();
+ await Service.engineManager.register(EHTestsCommon.CatapultEngine);
+ engine = Service.engineManager.get("catapult");
+});
+
+async function clean() {
+ let promiseLogReset = promiseOneObserver("weave:service:reset-file-log");
+ await Service.startOver();
+ await promiseLogReset;
+ Status.resetSync();
+ Status.resetBackoff();
+ removeLogFiles();
+ // Move log levels back to trace (startOver will have reversed this), sicne
+ syncTestLogging();
+}
+
+add_task(async function test_crypto_keys_login_server_maintenance_error() {
+ enableValidationPrefs();
+
+ Status.resetSync();
+ // Test crypto/keys server maintenance errors are not reported.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ await configureIdentity({ username: "broken.keys" }, server);
+
+ // Force re-download of keys
+ Service.collectionKeys.clear();
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_lastSync_not_updated_on_complete_failure() {
+ enableValidationPrefs();
+
+ // Test info/collections prolonged server maintenance errors are reported.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ await configureIdentity({ username: "johndoe" }, server);
+
+ // Do an initial sync that we expect to be successful.
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await sync_and_validate_telem();
+ await promiseObserved;
+
+ Assert.equal(Status.service, STATUS_OK);
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ let lastSync = Svc.PrefBranch.getStringPref("lastSync");
+
+ Assert.ok(lastSync);
+
+ // Report server maintenance on info/collections requests
+ server.registerPathHandler(
+ "/1.1/johndoe/info/collections",
+ EHTestsCommon.service_unavailable
+ );
+
+ promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await sync_and_validate_telem(() => {});
+ await promiseObserved;
+
+ Assert.equal(Status.sync, SERVER_MAINTENANCE);
+ Assert.equal(Status.service, SYNC_FAILED);
+
+ // We shouldn't update lastSync on complete failure.
+ Assert.equal(lastSync, Svc.PrefBranch.getStringPref("lastSync"));
+
+ await clean();
+ await promiseStopServer(server);
+});
+
+add_task(
+ async function test_sync_syncAndReportErrors_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test server maintenance errors are reported
+ // when calling syncAndReportErrors.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ const BACKOFF = 42;
+ engine.enabled = true;
+ engine.exception = { status: 503, headers: { "retry-after": BACKOFF } };
+
+ Assert.equal(Status.service, STATUS_OK);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ Assert.equal(Status.sync, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+ }
+);
+
+add_task(
+ async function test_info_collections_login_syncAndReportErrors_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test info/collections server maintenance errors are reported
+ // when calling syncAndReportErrors.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ await configureIdentity({ username: "broken.info" }, server);
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+ }
+);
+
+add_task(
+ async function test_meta_global_login_syncAndReportErrors_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test meta/global server maintenance errors are reported
+ // when calling syncAndReportErrors.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ await configureIdentity({ username: "broken.meta" }, server);
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+ }
+);
+
+add_task(
+ async function test_download_crypto_keys_login_syncAndReportErrors_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test crypto/keys server maintenance errors are reported
+ // when calling syncAndReportErrors.
+ let server = await EHTestsCommon.sync_httpd_setup();
+ await EHTestsCommon.setUp(server);
+
+ await configureIdentity({ username: "broken.keys" }, server);
+ // Force re-download of keys
+ Service.collectionKeys.clear();
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+ }
+);
+
+add_task(
+ async function test_upload_crypto_keys_login_syncAndReportErrors_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test crypto/keys server maintenance errors are reported
+ // when calling syncAndReportErrors.
+ let server = await EHTestsCommon.sync_httpd_setup();
+
+ // Start off with an empty account, do not upload a key.
+ await configureIdentity({ username: "broken.keys" }, server);
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+ }
+);
+
+add_task(
+ async function test_wipeServer_login_syncAndReportErrors_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test crypto/keys server maintenance errors are reported
+ // when calling syncAndReportErrors.
+ let server = await EHTestsCommon.sync_httpd_setup();
+
+ // Start off with an empty account, do not upload a key.
+ await configureIdentity({ username: "broken.wipe" }, server);
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Assert.equal(Status.login, SERVER_MAINTENANCE);
+
+ await clean();
+ await promiseStopServer(server);
+ }
+);
+
+add_task(
+ async function test_wipeRemote_syncAndReportErrors_server_maintenance_error() {
+ enableValidationPrefs();
+
+ // Test that we report prolonged server maintenance errors that occur whilst
+ // wiping all remote devices.
+ let server = await EHTestsCommon.sync_httpd_setup();
+
+ await configureIdentity({ username: "broken.wipe" }, server);
+ await EHTestsCommon.generateAndUploadKeys();
+
+ engine.exception = null;
+ engine.enabled = true;
+
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.service, STATUS_OK);
+
+ Svc.PrefBranch.setStringPref("firstSync", "wipeRemote");
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Status.service, SYNC_FAILED);
+ Assert.equal(Status.sync, SERVER_MAINTENANCE);
+ Assert.equal(Svc.PrefBranch.getStringPref("firstSync"), "wipeRemote");
+
+ await clean();
+ await promiseStopServer(server);
+ }
+);
+
+add_task(async function test_sync_engine_generic_fail() {
+ enableValidationPrefs();
+
+ equal(getLogFiles().length, 0);
+
+ let server = await EHTestsCommon.sync_httpd_setup();
+ engine.enabled = true;
+ engine.sync = async function sync() {
+ Svc.Obs.notify("weave:engine:sync:error", ENGINE_UNKNOWN_FAIL, "catapult");
+ };
+ let lastSync = Svc.PrefBranch.getStringPref("lastSync", null);
+ let log = Log.repository.getLogger("Sync.ErrorHandler");
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+
+ Assert.equal(Status.engines.catapult, undefined);
+
+ let promiseObserved = new Promise(res => {
+ Svc.Obs.add("weave:engine:sync:finish", function onEngineFinish() {
+ Svc.Obs.remove("weave:engine:sync:finish", onEngineFinish);
+
+ log.info("Adding reset-file-log observer.");
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+ res();
+ });
+ });
+ });
+
+ Assert.ok(await EHTestsCommon.setUp(server));
+ await sync_and_validate_telem(ping => {
+ deepEqual(ping.status.service, SYNC_FAILED_PARTIAL);
+ deepEqual(ping.engines.find(e => e.status).status, ENGINE_UNKNOWN_FAIL);
+ });
+
+ await promiseObserved;
+
+ _("Status.engines: " + JSON.stringify(Status.engines));
+ Assert.equal(Status.engines.catapult, ENGINE_UNKNOWN_FAIL);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+
+ // lastSync should update on partial failure.
+ Assert.notEqual(lastSync, Svc.PrefBranch.getStringPref("lastSync"));
+
+ // Test Error log was written on SYNC_FAILED_PARTIAL.
+ let logFiles = getLogFiles();
+ equal(logFiles.length, 1);
+ Assert.ok(
+ logFiles[0].leafName.startsWith("error-sync-"),
+ logFiles[0].leafName
+ );
+
+ await clean();
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_logs_on_sync_error() {
+ enableValidationPrefs();
+
+ _(
+ "Ensure that an error is still logged when weave:service:sync:error " +
+ "is notified, despite shouldReportError returning false."
+ );
+
+ let log = Log.repository.getLogger("Sync.ErrorHandler");
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+ log.info("TESTING");
+
+ // Ensure that we report no error.
+ Status.login = MASTER_PASSWORD_LOCKED;
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ Svc.Obs.notify("weave:service:sync:error", {});
+ await promiseObserved;
+
+ // Test that error log was written.
+ let logFiles = getLogFiles();
+ equal(logFiles.length, 1);
+ Assert.ok(
+ logFiles[0].leafName.startsWith("error-sync-"),
+ logFiles[0].leafName
+ );
+
+ await clean();
+});
+
+add_task(async function test_logs_on_login_error() {
+ enableValidationPrefs();
+
+ _(
+ "Ensure that an error is still logged when weave:service:login:error " +
+ "is notified, despite shouldReportError returning false."
+ );
+
+ let log = Log.repository.getLogger("Sync.ErrorHandler");
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+ log.info("TESTING");
+
+ // Ensure that we report no error.
+ Status.login = MASTER_PASSWORD_LOCKED;
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+ Svc.Obs.notify("weave:service:login:error", {});
+ await promiseObserved;
+
+ // Test that error log was written.
+ let logFiles = getLogFiles();
+ equal(logFiles.length, 1);
+ Assert.ok(
+ logFiles[0].leafName.startsWith("error-sync-"),
+ logFiles[0].leafName
+ );
+
+ await clean();
+});
+
+// This test should be the last one since it monkeypatches the engine object
+// and we should only have one engine object throughout the file (bug 629664).
+add_task(async function test_engine_applyFailed() {
+ enableValidationPrefs();
+
+ let server = await EHTestsCommon.sync_httpd_setup();
+
+ engine.enabled = true;
+ delete engine.exception;
+ engine.sync = async function sync() {
+ Svc.Obs.notify("weave:engine:sync:applied", { newFailed: 1 }, "catapult");
+ };
+
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+
+ let promiseObserved = promiseOneObserver("weave:service:reset-file-log");
+
+ Assert.equal(Status.engines.catapult, undefined);
+ Assert.ok(await EHTestsCommon.setUp(server));
+ await Service.sync();
+ await promiseObserved;
+
+ Assert.equal(Status.engines.catapult, ENGINE_APPLY_FAIL);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+
+ // Test Error log was written on SYNC_FAILED_PARTIAL.
+ let logFiles = getLogFiles();
+ equal(logFiles.length, 1);
+ Assert.ok(
+ logFiles[0].leafName.startsWith("error-sync-"),
+ logFiles[0].leafName
+ );
+
+ await clean();
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_errorhandler_filelog.js b/services/sync/tests/unit/test_errorhandler_filelog.js
new file mode 100644
index 0000000000..66260b3f59
--- /dev/null
+++ b/services/sync/tests/unit/test_errorhandler_filelog.js
@@ -0,0 +1,473 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// `Service` is used as a global in head_helpers.js.
+// eslint-disable-next-line no-unused-vars
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { logManager } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccountsCommon.sys.mjs"
+);
+const { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+
+const logsdir = FileUtils.getDir("ProfD", ["weave", "logs"]);
+logsdir.create(Ci.nsIFile.DIRECTORY_TYPE, FileUtils.PERMS_DIRECTORY);
+
+// Delay to wait before cleanup, to allow files to age.
+// This is so large because the file timestamp granularity is per-second, and
+// so otherwise we can end up with all of our files -- the ones we want to
+// keep, and the ones we want to clean up -- having the same modified time.
+const CLEANUP_DELAY = 2000;
+const DELAY_BUFFER = 500; // Buffer for timers on different OS platforms.
+
+function run_test() {
+ validate_all_future_pings();
+ run_next_test();
+}
+
+add_test(function test_noOutput() {
+ // Ensure that the log appender won't print anything.
+ logManager._fileAppender.level = Log.Level.Fatal + 1;
+
+ // Clear log output from startup.
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnSuccess", false);
+ Svc.Obs.notify("weave:service:sync:finish");
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLogOuter() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLogOuter);
+ // Clear again without having issued any output.
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnSuccess", true);
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLogInner() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLogInner);
+
+ logManager._fileAppender.level = Log.Level.Trace;
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+
+ // Fake a successful sync.
+ Svc.Obs.notify("weave:service:sync:finish");
+ });
+});
+
+add_test(function test_logOnSuccess_false() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnSuccess", false);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ log.info("this won't show up");
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+ // No log file was written.
+ Assert.ok(!logsdir.directoryEntries.hasMoreElements());
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+
+ // Fake a successful sync.
+ Svc.Obs.notify("weave:service:sync:finish");
+});
+
+function readFile(file, callback) {
+ NetUtil.asyncFetch(
+ {
+ uri: NetUtil.newURI(file),
+ loadUsingSystemPrincipal: true,
+ },
+ function (inputStream, statusCode, request) {
+ let data = NetUtil.readInputStreamToString(
+ inputStream,
+ inputStream.available()
+ );
+ callback(statusCode, data);
+ }
+ );
+}
+
+add_test(function test_logOnSuccess_true() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnSuccess", true);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ const MESSAGE = "this WILL show up";
+ log.info(MESSAGE);
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+
+ // Exactly one log file was written.
+ let entries = logsdir.directoryEntries;
+ Assert.ok(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsIFile);
+ Assert.equal(logfile.leafName.slice(-4), ".txt");
+ Assert.ok(logfile.leafName.startsWith("success-sync-"), logfile.leafName);
+ Assert.ok(!entries.hasMoreElements());
+
+ // Ensure the log message was actually written to file.
+ readFile(logfile, function (error, data) {
+ Assert.ok(Components.isSuccessCode(error));
+ Assert.notEqual(data.indexOf(MESSAGE), -1);
+
+ // Clean up.
+ try {
+ logfile.remove(false);
+ } catch (ex) {
+ dump("Couldn't delete file: " + ex.message + "\n");
+ // Stupid Windows box.
+ }
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+ });
+
+ // Fake a successful sync.
+ Svc.Obs.notify("weave:service:sync:finish");
+});
+
+add_test(function test_sync_error_logOnError_false() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", false);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ log.info("this won't show up");
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+ // No log file was written.
+ Assert.ok(!logsdir.directoryEntries.hasMoreElements());
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+
+ // Fake an unsuccessful sync.
+ Svc.Obs.notify("weave:service:sync:error");
+});
+
+add_test(function test_sync_error_logOnError_true() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ const MESSAGE = "this WILL show up";
+ log.info(MESSAGE);
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+
+ // Exactly one log file was written.
+ let entries = logsdir.directoryEntries;
+ Assert.ok(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsIFile);
+ Assert.equal(logfile.leafName.slice(-4), ".txt");
+ Assert.ok(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
+ Assert.ok(!entries.hasMoreElements());
+
+ // Ensure the log message was actually written to file.
+ readFile(logfile, function (error, data) {
+ Assert.ok(Components.isSuccessCode(error));
+ Assert.notEqual(data.indexOf(MESSAGE), -1);
+
+ // Clean up.
+ try {
+ logfile.remove(false);
+ } catch (ex) {
+ dump("Couldn't delete file: " + ex.message + "\n");
+ // Stupid Windows box.
+ }
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+ });
+
+ // Fake an unsuccessful sync.
+ Svc.Obs.notify("weave:service:sync:error");
+});
+
+add_test(function test_login_error_logOnError_false() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", false);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ log.info("this won't show up");
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+ // No log file was written.
+ Assert.ok(!logsdir.directoryEntries.hasMoreElements());
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+
+ // Fake an unsuccessful login.
+ Svc.Obs.notify("weave:service:login:error");
+});
+
+add_test(function test_login_error_logOnError_true() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ const MESSAGE = "this WILL show up";
+ log.info(MESSAGE);
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+
+ // Exactly one log file was written.
+ let entries = logsdir.directoryEntries;
+ Assert.ok(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsIFile);
+ Assert.equal(logfile.leafName.slice(-4), ".txt");
+ Assert.ok(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
+ Assert.ok(!entries.hasMoreElements());
+
+ // Ensure the log message was actually written to file.
+ readFile(logfile, function (error, data) {
+ Assert.ok(Components.isSuccessCode(error));
+ Assert.notEqual(data.indexOf(MESSAGE), -1);
+
+ // Clean up.
+ try {
+ logfile.remove(false);
+ } catch (ex) {
+ dump("Couldn't delete file: " + ex.message + "\n");
+ // Stupid Windows box.
+ }
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+ });
+
+ // Fake an unsuccessful login.
+ Svc.Obs.notify("weave:service:login:error");
+});
+
+add_test(function test_noNewFailed_noErrorLog() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnSuccess", false);
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+ // No log file was written.
+ Assert.ok(!logsdir.directoryEntries.hasMoreElements());
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+ // failed is nonzero and newFailed is zero -- shouldn't write a log.
+ let count = {
+ applied: 8,
+ succeeded: 4,
+ failed: 5,
+ newFailed: 0,
+ reconciled: 4,
+ };
+ Svc.Obs.notify("weave:engine:sync:applied", count, "foobar-engine");
+ Svc.Obs.notify("weave:service:sync:finish");
+});
+
+add_test(function test_newFailed_errorLog() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnSuccess", false);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ const MESSAGE = "this WILL show up 2";
+ log.info(MESSAGE);
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+
+ // Exactly one log file was written.
+ let entries = logsdir.directoryEntries;
+ Assert.ok(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsIFile);
+ Assert.equal(logfile.leafName.slice(-4), ".txt");
+ Assert.ok(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
+ Assert.ok(!entries.hasMoreElements());
+
+ // Ensure the log message was actually written to file.
+ readFile(logfile, function (error, data) {
+ Assert.ok(Components.isSuccessCode(error));
+ Assert.notEqual(data.indexOf(MESSAGE), -1);
+
+ // Clean up.
+ try {
+ logfile.remove(false);
+ } catch (ex) {
+ dump("Couldn't delete file: " + ex.message + "\n");
+ // Stupid Windows box.
+ }
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+ });
+ // newFailed is nonzero -- should write a log.
+ let count = {
+ applied: 8,
+ succeeded: 4,
+ failed: 5,
+ newFailed: 4,
+ reconciled: 4,
+ };
+
+ Svc.Obs.notify("weave:engine:sync:applied", count, "foobar-engine");
+ Svc.Obs.notify("weave:service:sync:finish");
+});
+
+add_test(function test_errorLog_dumpAddons() {
+ Svc.PrefBranch.setStringPref("log.logger", "Trace");
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+
+ Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
+ Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+
+ let entries = logsdir.directoryEntries;
+ Assert.ok(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsIFile);
+ Assert.equal(logfile.leafName.slice(-4), ".txt");
+ Assert.ok(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
+ Assert.ok(!entries.hasMoreElements());
+
+ // Ensure we logged some addon list (which is probably empty)
+ readFile(logfile, function (error, data) {
+ Assert.ok(Components.isSuccessCode(error));
+ Assert.notEqual(data.indexOf("Addons installed"), -1);
+
+ // Clean up.
+ try {
+ logfile.remove(false);
+ } catch (ex) {
+ dump("Couldn't delete file: " + ex.message + "\n");
+ // Stupid Windows box.
+ }
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ });
+ });
+
+ // Fake an unsuccessful sync.
+ Svc.Obs.notify("weave:service:sync:error");
+});
+
+// Check that error log files are deleted above an age threshold.
+add_test(async function test_logErrorCleanup_age() {
+ _("Beginning test_logErrorCleanup_age.");
+ let maxAge = CLEANUP_DELAY / 1000;
+ let oldLogs = [];
+ let numLogs = 10;
+ let errString = "some error log\n";
+
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+ Svc.PrefBranch.setIntPref("log.appender.file.maxErrorAge", maxAge);
+
+ _("Making some files.");
+ const logsDir = PathUtils.join(PathUtils.profileDir, "weave", "logs");
+ await IOUtils.makeDirectory(logsDir);
+ for (let i = 0; i < numLogs; i++) {
+ let now = Date.now();
+ let filename = "error-sync-" + now + "" + i + ".txt";
+ let newLog = new FileUtils.File(PathUtils.join(logsDir, filename));
+ let foStream = FileUtils.openFileOutputStream(newLog);
+ foStream.write(errString, errString.length);
+ foStream.close();
+ _(" > Created " + filename);
+ oldLogs.push(newLog.leafName);
+ }
+
+ Svc.Obs.add(
+ "services-tests:common:log-manager:cleanup-logs",
+ function onCleanupLogs() {
+ Svc.Obs.remove(
+ "services-tests:common:log-manager:cleanup-logs",
+ onCleanupLogs
+ );
+
+ // Only the newest created log file remains.
+ let entries = logsdir.directoryEntries;
+ Assert.ok(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsIFile);
+ Assert.ok(
+ oldLogs.every(function (e) {
+ return e != logfile.leafName;
+ })
+ );
+ Assert.ok(!entries.hasMoreElements());
+
+ // Clean up.
+ try {
+ logfile.remove(false);
+ } catch (ex) {
+ dump("Couldn't delete file: " + ex.message + "\n");
+ // Stupid Windows box.
+ }
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ run_next_test();
+ }
+ );
+
+ let delay = CLEANUP_DELAY + DELAY_BUFFER;
+
+ _("Cleaning up logs after " + delay + "msec.");
+ CommonUtils.namedTimer(
+ function onTimer() {
+ Svc.Obs.notify("weave:service:sync:error");
+ },
+ delay,
+ this,
+ "cleanup-timer"
+ );
+});
+
+add_task(async function test_remove_log_on_startOver() {
+ Svc.PrefBranch.setBoolPref("log.appender.file.logOnError", true);
+
+ let log = Log.repository.getLogger("Sync.Test.FileLog");
+ const MESSAGE = "this WILL show up";
+ log.info(MESSAGE);
+
+ let promiseLogWritten = promiseOneObserver("weave:service:reset-file-log");
+ // Fake an unsuccessful sync.
+ Svc.Obs.notify("weave:service:sync:error");
+
+ await promiseLogWritten;
+ // Should have at least 1 log file.
+ let entries = logsdir.directoryEntries;
+ Assert.ok(entries.hasMoreElements());
+
+ // Fake a reset.
+ let promiseRemoved = promiseOneObserver("weave:service:remove-file-log");
+ Svc.Obs.notify("weave:service:start-over:finish");
+ await promiseRemoved;
+
+ // should be no files left.
+ Assert.ok(!logsdir.directoryEntries.hasMoreElements());
+});
diff --git a/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js b/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
new file mode 100644
index 0000000000..d73d548cc7
--- /dev/null
+++ b/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
@@ -0,0 +1,294 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+const { FakeCryptoService } = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/fakeservices.sys.mjs"
+);
+
+var engineManager = Service.engineManager;
+
+function CatapultEngine() {
+ SyncEngine.call(this, "Catapult", Service);
+}
+CatapultEngine.prototype = {
+ exception: null, // tests fill this in
+ async _sync() {
+ throw this.exception;
+ },
+};
+Object.setPrototypeOf(CatapultEngine.prototype, SyncEngine.prototype);
+
+async function sync_httpd_setup() {
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+
+ let catapultEngine = engineManager.get("catapult");
+ let syncID = await catapultEngine.resetLocalSyncID();
+ let engines = { catapult: { version: catapultEngine.version, syncID } };
+
+ // Track these using the collections helper, which keeps modified times
+ // up-to-date.
+ let clientsColl = new ServerCollection({}, true);
+ let keysWBO = new ServerWBO("keys");
+ let globalWBO = new ServerWBO("global", {
+ storageVersion: STORAGE_VERSION,
+ syncID: Utils.makeGUID(),
+ engines,
+ });
+
+ let handlers = {
+ "/1.1/johndoe/info/collections": collectionsHelper.handler,
+ "/1.1/johndoe/storage/meta/global": upd("meta", globalWBO.handler()),
+ "/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler()),
+ "/1.1/johndoe/storage/crypto/keys": upd("crypto", keysWBO.handler()),
+ };
+ return httpd_setup(handlers);
+}
+
+async function setUp(server) {
+ await configureIdentity({ username: "johndoe" }, server);
+ new FakeCryptoService();
+ syncTestLogging();
+}
+
+async function generateAndUploadKeys(server) {
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ let res = Service.resource(
+ server.baseURI + "/1.1/johndoe/storage/crypto/keys"
+ );
+ return (await serverKeys.upload(res)).success;
+}
+
+add_task(async function setup() {
+ await engineManager.clear();
+ validate_all_future_pings();
+ await engineManager.register(CatapultEngine);
+});
+
+add_task(async function test_backoff500() {
+ enableValidationPrefs();
+
+ _("Test: HTTP 500 sets backoff status.");
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ let engine = engineManager.get("catapult");
+ engine.enabled = true;
+ engine.exception = { status: 500 };
+
+ try {
+ Assert.ok(!Status.enforceBackoff);
+
+ // Forcibly create and upload keys here -- otherwise we don't get to the 500!
+ Assert.ok(await generateAndUploadKeys(server));
+
+ await Service.login();
+ await Service.sync();
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ } finally {
+ Status.resetBackoff();
+ await Service.startOver();
+ }
+ await promiseStopServer(server);
+});
+
+add_task(async function test_backoff503() {
+ enableValidationPrefs();
+
+ _(
+ "Test: HTTP 503 with Retry-After header leads to backoff notification and sets backoff status."
+ );
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ const BACKOFF = 42;
+ let engine = engineManager.get("catapult");
+ engine.enabled = true;
+ engine.exception = { status: 503, headers: { "retry-after": BACKOFF } };
+
+ let backoffInterval;
+ Svc.Obs.add("weave:service:backoff:interval", function (subject) {
+ backoffInterval = subject;
+ });
+
+ try {
+ Assert.ok(!Status.enforceBackoff);
+
+ Assert.ok(await generateAndUploadKeys(server));
+
+ await Service.login();
+ await Service.sync();
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(backoffInterval, BACKOFF);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ Assert.equal(Status.sync, SERVER_MAINTENANCE);
+ } finally {
+ Status.resetBackoff();
+ Status.resetSync();
+ await Service.startOver();
+ }
+ await promiseStopServer(server);
+});
+
+add_task(async function test_overQuota() {
+ enableValidationPrefs();
+
+ _("Test: HTTP 400 with body error code 14 means over quota.");
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ let engine = engineManager.get("catapult");
+ engine.enabled = true;
+ engine.exception = {
+ status: 400,
+ toString() {
+ return "14";
+ },
+ };
+
+ try {
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Assert.ok(await generateAndUploadKeys(server));
+
+ await Service.login();
+ await Service.sync();
+
+ Assert.equal(Status.sync, OVER_QUOTA);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ } finally {
+ Status.resetSync();
+ await Service.startOver();
+ }
+ await promiseStopServer(server);
+});
+
+add_task(async function test_service_networkError() {
+ enableValidationPrefs();
+
+ _(
+ "Test: Connection refused error from Service.sync() leads to the right status code."
+ );
+ let server = await sync_httpd_setup();
+ await setUp(server);
+ await promiseStopServer(server);
+ // Provoke connection refused.
+ Service.clusterURL = "http://localhost:12345/";
+
+ try {
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Service._loggedIn = true;
+ await Service.sync();
+
+ Assert.equal(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+ Assert.equal(Status.service, SYNC_FAILED);
+ } finally {
+ Status.resetSync();
+ await Service.startOver();
+ }
+});
+
+add_task(async function test_service_offline() {
+ enableValidationPrefs();
+
+ _(
+ "Test: Wanting to sync in offline mode leads to the right status code but does not increment the ignorable error count."
+ );
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ await promiseStopServer(server);
+ Services.io.offline = true;
+ Services.prefs.setBoolPref("network.dns.offline-localhost", false);
+
+ try {
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Service._loggedIn = true;
+ await Service.sync();
+
+ Assert.equal(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+ Assert.equal(Status.service, SYNC_FAILED);
+ } finally {
+ Status.resetSync();
+ await Service.startOver();
+ }
+ Services.io.offline = false;
+ Services.prefs.clearUserPref("network.dns.offline-localhost");
+});
+
+add_task(async function test_engine_networkError() {
+ enableValidationPrefs();
+
+ _(
+ "Test: Network related exceptions from engine.sync() lead to the right status code."
+ );
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ let engine = engineManager.get("catapult");
+ engine.enabled = true;
+ engine.exception = Components.Exception(
+ "NS_ERROR_UNKNOWN_HOST",
+ Cr.NS_ERROR_UNKNOWN_HOST
+ );
+
+ try {
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Assert.ok(await generateAndUploadKeys(server));
+
+ await Service.login();
+ await Service.sync();
+
+ Assert.equal(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ } finally {
+ Status.resetSync();
+ await Service.startOver();
+ }
+ await promiseStopServer(server);
+});
+
+add_task(async function test_resource_timeout() {
+ enableValidationPrefs();
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ let engine = engineManager.get("catapult");
+ engine.enabled = true;
+ // Resource throws this when it encounters a timeout.
+ engine.exception = Components.Exception(
+ "Aborting due to channel inactivity.",
+ Cr.NS_ERROR_NET_TIMEOUT
+ );
+
+ try {
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Assert.ok(await generateAndUploadKeys(server));
+
+ await Service.login();
+ await Service.sync();
+
+ Assert.equal(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ } finally {
+ Status.resetSync();
+ await Service.startOver();
+ }
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_extension_storage_engine.js b/services/sync/tests/unit/test_extension_storage_engine.js
new file mode 100644
index 0000000000..a061812aca
--- /dev/null
+++ b/services/sync/tests/unit/test_extension_storage_engine.js
@@ -0,0 +1,275 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+ChromeUtils.defineESModuleGetters(this, {
+ Service: "resource://services-sync/service.sys.mjs",
+ extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
+});
+
+const { ExtensionStorageEngineBridge, ExtensionStorageEngineKinto } =
+ ChromeUtils.importESModule(
+ "resource://services-sync/engines/extension-storage.sys.mjs"
+ );
+
+const { BridgeWrapperXPCOM } = ChromeUtils.importESModule(
+ "resource://services-sync/bridged_engine.sys.mjs"
+);
+
+Services.prefs.setStringPref("webextensions.storage.sync.log.level", "debug");
+
+add_task(async function test_switching_between_kinto_and_bridged() {
+ function assertUsingKinto(message) {
+ let kintoEngine = Service.engineManager.get("extension-storage");
+ Assert.ok(kintoEngine instanceof ExtensionStorageEngineKinto, message);
+ }
+ function assertUsingBridged(message) {
+ let bridgedEngine = Service.engineManager.get("extension-storage");
+ Assert.ok(bridgedEngine instanceof ExtensionStorageEngineBridge, message);
+ }
+
+ let isUsingKinto = Services.prefs.getBoolPref(
+ "webextensions.storage.sync.kinto",
+ false
+ );
+ if (isUsingKinto) {
+ assertUsingKinto("Should use Kinto engine before flipping pref");
+ } else {
+ assertUsingBridged("Should use bridged engine before flipping pref");
+ }
+
+ _("Flip pref");
+ Services.prefs.setBoolPref("webextensions.storage.sync.kinto", !isUsingKinto);
+ await Service.engineManager.switchAlternatives();
+
+ if (isUsingKinto) {
+ assertUsingBridged("Should use bridged engine after flipping pref");
+ } else {
+ assertUsingKinto("Should use Kinto engine after flipping pref");
+ }
+
+ _("Clean up");
+ Services.prefs.clearUserPref("webextensions.storage.sync.kinto");
+ await Service.engineManager.switchAlternatives();
+});
+
+add_task(async function test_enable() {
+ const PREF = "services.sync.engine.extension-storage.force";
+
+ let addonsEngine = Service.engineManager.get("addons");
+ let extensionStorageEngine = Service.engineManager.get("extension-storage");
+
+ try {
+ Assert.ok(
+ addonsEngine.enabled,
+ "Add-ons engine should be enabled by default"
+ );
+ Assert.ok(
+ extensionStorageEngine.enabled,
+ "Extension storage engine should be enabled by default"
+ );
+
+ addonsEngine.enabled = false;
+ Assert.ok(
+ !extensionStorageEngine.enabled,
+ "Disabling add-ons should disable extension storage"
+ );
+
+ extensionStorageEngine.enabled = true;
+ Assert.ok(
+ !extensionStorageEngine.enabled,
+ "Enabling extension storage without override pref shouldn't work"
+ );
+
+ Services.prefs.setBoolPref(PREF, true);
+ Assert.ok(
+ extensionStorageEngine.enabled,
+ "Setting override pref should enable extension storage"
+ );
+
+ extensionStorageEngine.enabled = false;
+ Assert.ok(
+ !extensionStorageEngine.enabled,
+ "Disabling extension storage engine with override pref should work"
+ );
+
+ extensionStorageEngine.enabled = true;
+ Assert.ok(
+ extensionStorageEngine.enabled,
+ "Enabling extension storage with override pref should work"
+ );
+ } finally {
+ addonsEngine.enabled = true;
+ Services.prefs.clearUserPref(PREF);
+ }
+});
+
+add_task(async function test_notifyPendingChanges() {
+ let engine = new ExtensionStorageEngineBridge(Service);
+
+ let extension = { id: "ext-1" };
+ let expectedChange = {
+ a: "b",
+ c: "d",
+ };
+
+ let lastSync = 0;
+ let syncID = Utils.makeGUID();
+ let error = null;
+ engine.component = {
+ QueryInterface: ChromeUtils.generateQI([
+ "mozIBridgedSyncEngine",
+ "mozIExtensionStorageArea",
+ "mozISyncedExtensionStorageArea",
+ ]),
+ ensureCurrentSyncId(id, callback) {
+ if (syncID != id) {
+ syncID = id;
+ lastSync = 0;
+ }
+ callback.handleSuccess(id);
+ },
+ resetSyncId(callback) {
+ callback.handleSuccess(syncID);
+ },
+ syncStarted(callback) {
+ callback.handleSuccess();
+ },
+ getLastSync(callback) {
+ callback.handleSuccess(lastSync);
+ },
+ setLastSync(lastSyncMillis, callback) {
+ lastSync = lastSyncMillis;
+ callback.handleSuccess();
+ },
+ apply(callback) {
+ callback.handleSuccess([]);
+ },
+ fetchPendingSyncChanges(callback) {
+ if (error) {
+ callback.handleError(Cr.NS_ERROR_FAILURE, error.message);
+ } else {
+ callback.onChanged(extension.id, JSON.stringify(expectedChange));
+ callback.handleSuccess();
+ }
+ },
+ setUploaded(modified, ids, callback) {
+ callback.handleSuccess();
+ },
+ syncFinished(callback) {
+ callback.handleSuccess();
+ },
+ takeMigrationInfo(callback) {
+ callback.handleSuccess(null);
+ },
+ };
+
+ engine._bridge = new BridgeWrapperXPCOM(engine.component);
+
+ let server = await serverForFoo(engine);
+
+ let actualChanges = [];
+ let listener = changes => actualChanges.push(changes);
+ extensionStorageSync.addOnChangedListener(extension, listener);
+
+ try {
+ await SyncTestingInfrastructure(server);
+
+ info("Sync engine; notify about changes");
+ await sync_engine_and_validate_telem(engine, false);
+ deepEqual(
+ actualChanges,
+ [expectedChange],
+ "Should notify about changes during sync"
+ );
+
+ error = new Error("oops!");
+ actualChanges = [];
+ await sync_engine_and_validate_telem(engine, false);
+ deepEqual(
+ actualChanges,
+ [],
+ "Should finish syncing even if notifying about changes fails"
+ );
+ } finally {
+ extensionStorageSync.removeOnChangedListener(extension, listener);
+ await promiseStopServer(server);
+ await engine.finalize();
+ }
+});
+
+// It's difficult to know what to test - there's already tests for the bridged
+// engine etc - so we just try and check that this engine conforms to the
+// mozIBridgedSyncEngine interface guarantees.
+add_task(async function test_engine() {
+ // Forcibly set the bridged engine in the engine manager. the reason we do
+ // this, unlike the other tests where we just create the engine, is so that
+ // telemetry can get at the engine's `overrideTelemetryName`, which it gets
+ // through the engine manager.
+ await Service.engineManager.unregister("extension-storage");
+ await Service.engineManager.register(ExtensionStorageEngineBridge);
+ let engine = Service.engineManager.get("extension-storage");
+ Assert.equal(engine.version, 1);
+
+ Assert.deepEqual(await engine.getSyncID(), null);
+ await engine.resetLocalSyncID();
+ Assert.notEqual(await engine.getSyncID(), null);
+
+ Assert.equal(await engine.getLastSync(), 0);
+ // lastSync is seconds on this side of the world, but milli-seconds on the other.
+ await engine.setLastSync(1234.567);
+ // should have 2 digit precision.
+ Assert.equal(await engine.getLastSync(), 1234.57);
+ await engine.setLastSync(0);
+
+ // Set some data.
+ await extensionStorageSync.set({ id: "ext-2" }, { ext_2_key: "ext_2_value" });
+ // Now do a sync with out regular test server.
+ let server = await serverForFoo(engine);
+ try {
+ await SyncTestingInfrastructure(server);
+
+ info("Add server records");
+ let foo = server.user("foo");
+ let collection = foo.collection("extension-storage");
+ let now = new_timestamp();
+
+ collection.insert(
+ "fakeguid0000",
+ encryptPayload({
+ id: "fakeguid0000",
+ extId: "ext-1",
+ data: JSON.stringify({ foo: "bar" }),
+ }),
+ now
+ );
+
+ info("Sync the engine");
+
+ let ping = await sync_engine_and_validate_telem(engine, false);
+ Assert.ok(ping.engines.find(e => e.name == "rust-webext-storage"));
+ Assert.equal(
+ ping.engines.find(e => e.name == "extension-storage"),
+ null
+ );
+
+ // We should have applied the data from the existing collection record.
+ Assert.deepEqual(await extensionStorageSync.get({ id: "ext-1" }, null), {
+ foo: "bar",
+ });
+
+ // should now be 2 records on the server.
+ let payloads = collection.payloads();
+ Assert.equal(payloads.length, 2);
+ // find the new one we wrote.
+ let newPayload =
+ payloads[0].id == "fakeguid0000" ? payloads[1] : payloads[0];
+ Assert.equal(newPayload.data, `{"ext_2_key":"ext_2_value"}`);
+ // should have updated the timestamp.
+ greater(await engine.getLastSync(), 0, "Should update last sync time");
+ } finally {
+ await promiseStopServer(server);
+ await engine.finalize();
+ }
+});
diff --git a/services/sync/tests/unit/test_extension_storage_engine_kinto.js b/services/sync/tests/unit/test_extension_storage_engine_kinto.js
new file mode 100644
index 0000000000..b074fe376c
--- /dev/null
+++ b/services/sync/tests/unit/test_extension_storage_engine_kinto.js
@@ -0,0 +1,136 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+Services.prefs.setBoolPref("webextensions.storage.sync.kinto", true);
+
+const { ExtensionStorageEngineKinto: ExtensionStorageEngine } =
+ ChromeUtils.importESModule(
+ "resource://services-sync/engines/extension-storage.sys.mjs"
+ );
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { extensionStorageSyncKinto: extensionStorageSync } =
+ ChromeUtils.importESModule(
+ "resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs"
+ );
+
+let engine;
+
+function mock(options) {
+ let calls = [];
+ let ret = function () {
+ calls.push(arguments);
+ return options.returns;
+ };
+ let proto = {
+ get calls() {
+ return calls;
+ },
+ };
+ Object.setPrototypeOf(proto, Function.prototype);
+ Object.setPrototypeOf(ret, proto);
+ return ret;
+}
+
+function setSkipChance(v) {
+ Services.prefs.setIntPref(
+ "services.sync.extension-storage.skipPercentageChance",
+ v
+ );
+}
+
+add_task(async function setup() {
+ await Service.engineManager.register(ExtensionStorageEngine);
+ engine = Service.engineManager.get("extension-storage");
+ do_get_profile(); // so we can use FxAccounts
+ loadWebExtensionTestFunctions();
+ setSkipChance(0);
+});
+
+add_task(async function test_calling_sync_calls__sync() {
+ let oldSync = ExtensionStorageEngine.prototype._sync;
+ let syncMock = (ExtensionStorageEngine.prototype._sync = mock({
+ returns: true,
+ }));
+ try {
+ // I wanted to call the main sync entry point for the entire
+ // package, but that fails because it tries to sync ClientEngine
+ // first, which fails.
+ await engine.sync();
+ } finally {
+ ExtensionStorageEngine.prototype._sync = oldSync;
+ }
+ equal(syncMock.calls.length, 1);
+});
+
+add_task(async function test_sync_skip() {
+ try {
+ // Do a few times to ensure we aren't getting "lucky" WRT Math.random()
+ for (let i = 0; i < 10; ++i) {
+ setSkipChance(100);
+ engine._tracker._score = 0;
+ ok(
+ !engine.shouldSkipSync("user"),
+ "Should allow explicitly requested syncs"
+ );
+ ok(!engine.shouldSkipSync("startup"), "Should allow startup syncs");
+ ok(
+ engine.shouldSkipSync("schedule"),
+ "Should skip scheduled syncs if skipProbability is 100"
+ );
+ engine._tracker._score = MULTI_DEVICE_THRESHOLD;
+ ok(
+ !engine.shouldSkipSync("schedule"),
+ "should allow scheduled syncs if tracker score is high"
+ );
+ engine._tracker._score = 0;
+ setSkipChance(0);
+ ok(
+ !engine.shouldSkipSync("schedule"),
+ "Should allow scheduled syncs if probability is 0"
+ );
+ }
+ } finally {
+ engine._tracker._score = 0;
+ setSkipChance(0);
+ }
+});
+
+add_task(async function test_calling_wipeClient_calls_clearAll() {
+ let oldClearAll = extensionStorageSync.clearAll;
+ let clearMock = (extensionStorageSync.clearAll = mock({
+ returns: Promise.resolve(),
+ }));
+ try {
+ await engine.wipeClient();
+ } finally {
+ extensionStorageSync.clearAll = oldClearAll;
+ }
+ equal(clearMock.calls.length, 1);
+});
+
+add_task(async function test_calling_sync_calls_ext_storage_sync() {
+ const extension = { id: "my-extension" };
+ let oldSync = extensionStorageSync.syncAll;
+ let syncMock = (extensionStorageSync.syncAll = mock({
+ returns: Promise.resolve(),
+ }));
+ try {
+ await withSyncContext(async function (context) {
+ // Set something so that everyone knows that we're using storage.sync
+ await extensionStorageSync.set(extension, { a: "b" }, context);
+ let ping = await sync_engine_and_validate_telem(engine, false);
+ Assert.ok(ping.engines.find(e => e.name == "extension-storage"));
+ Assert.equal(
+ ping.engines.find(e => e.name == "rust-webext-storage"),
+ null
+ );
+ });
+ } finally {
+ extensionStorageSync.syncAll = oldSync;
+ }
+ Assert.ok(syncMock.calls.length >= 1);
+});
diff --git a/services/sync/tests/unit/test_extension_storage_migration_telem.js b/services/sync/tests/unit/test_extension_storage_migration_telem.js
new file mode 100644
index 0000000000..a4b4c95f55
--- /dev/null
+++ b/services/sync/tests/unit/test_extension_storage_migration_telem.js
@@ -0,0 +1,81 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Import the rust-based and kinto-based implementations. Not great to grab
+// these as they're somewhat private, but we want to run the pings through our
+// validation machinery which is here in the sync test code.
+const { extensionStorageSync: rustImpl } = ChromeUtils.importESModule(
+ "resource://gre/modules/ExtensionStorageSync.sys.mjs"
+);
+const { extensionStorageSyncKinto: kintoImpl } = ChromeUtils.importESModule(
+ "resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs"
+);
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { ExtensionStorageEngineBridge } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/extension-storage.sys.mjs"
+);
+
+Services.prefs.setBoolPref("webextensions.storage.sync.kinto", false);
+Services.prefs.setStringPref("webextensions.storage.sync.log.level", "debug");
+
+// It's tricky to force error cases here (the databases are opened with
+// exclusive locks) and that part of the code has coverage in the vendored
+// application-services webext-storage crate. So this just tests that the
+// migration data ends up in the ping, and exactly once.
+add_task(async function test_sync_migration_telem() {
+ // Set some stuff using the kinto-based impl prior to fully setting up sync.
+ let e1 = { id: "test@mozilla.com" };
+ let c1 = { extension: e1, callOnClose() {} };
+
+ let e2 = { id: "test-2@mozilla.com" };
+ let c2 = { extension: e2, callOnClose() {} };
+ await kintoImpl.set(e1, { foo: "bar" }, c1);
+ await kintoImpl.set(e1, { baz: "quux" }, c1);
+ await kintoImpl.set(e2, { second: "2nd" }, c2);
+
+ Assert.deepEqual(await rustImpl.get(e1, "foo", c1), { foo: "bar" });
+ Assert.deepEqual(await rustImpl.get(e1, "baz", c1), { baz: "quux" });
+ Assert.deepEqual(await rustImpl.get(e2, null, c2), { second: "2nd" });
+
+ // Explicitly unregister first. It's very possible this isn't needed for this
+ // case, however it's fairly harmless, we hope to uplift this patch to beta,
+ // and earlier today we had beta-only problems caused by this (bug 1629116)
+ await Service.engineManager.unregister("extension-storage");
+ await Service.engineManager.register(ExtensionStorageEngineBridge);
+ let engine = Service.engineManager.get("extension-storage");
+ let server = await serverForFoo(engine, undefined);
+ try {
+ await SyncTestingInfrastructure(server);
+ await Service.engineManager.switchAlternatives();
+
+ _("First sync");
+ let ping = await sync_engine_and_validate_telem(engine, false, null, true);
+ Assert.deepEqual(ping.migrations, [
+ {
+ type: "webext-storage",
+ entries: 3,
+ entriesSuccessful: 3,
+ extensions: 2,
+ extensionsSuccessful: 2,
+ openFailure: false,
+ },
+ ]);
+
+ // force another sync
+ await engine.setLastSync(0);
+ _("Second sync");
+
+ ping = await sync_engine_and_validate_telem(engine, false, null, true);
+ Assert.deepEqual(ping.migrations, undefined);
+ } finally {
+ await kintoImpl.clear(e1, c1);
+ await kintoImpl.clear(e2, c2);
+ await rustImpl.clear(e1, c1);
+ await rustImpl.clear(e2, c2);
+ await promiseStopServer(server);
+ await engine.finalize();
+ }
+});
diff --git a/services/sync/tests/unit/test_extension_storage_tracker_kinto.js b/services/sync/tests/unit/test_extension_storage_tracker_kinto.js
new file mode 100644
index 0000000000..2de56ae400
--- /dev/null
+++ b/services/sync/tests/unit/test_extension_storage_tracker_kinto.js
@@ -0,0 +1,44 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+Services.prefs.setBoolPref("webextensions.storage.sync.kinto", true);
+
+const { ExtensionStorageEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/extension-storage.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { extensionStorageSyncKinto: extensionStorageSync } =
+ ChromeUtils.importESModule(
+ "resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs"
+ );
+
+let engine;
+
+add_task(async function setup() {
+ await Service.engineManager.register(ExtensionStorageEngine);
+ engine = Service.engineManager.get("extension-storage");
+ do_get_profile(); // so we can use FxAccounts
+ loadWebExtensionTestFunctions();
+});
+
+add_task(async function test_changing_extension_storage_changes_score() {
+ const tracker = engine._tracker;
+ const extension = { id: "my-extension-id" };
+ tracker.start();
+ await withSyncContext(async function (context) {
+ await extensionStorageSync.set(extension, { a: "b" }, context);
+ });
+ Assert.equal(tracker.score, SCORE_INCREMENT_MEDIUM);
+
+ tracker.resetScore();
+ await withSyncContext(async function (context) {
+ await extensionStorageSync.remove(extension, "a", context);
+ });
+ Assert.equal(tracker.score, SCORE_INCREMENT_MEDIUM);
+
+ await tracker.stop();
+});
diff --git a/services/sync/tests/unit/test_form_validator.js b/services/sync/tests/unit/test_form_validator.js
new file mode 100644
index 0000000000..58ea8b855b
--- /dev/null
+++ b/services/sync/tests/unit/test_form_validator.js
@@ -0,0 +1,86 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { FormValidator } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/forms.sys.mjs"
+);
+
+function getDummyServerAndClient() {
+ return {
+ server: [
+ {
+ id: "11111",
+ guid: "11111",
+ name: "foo",
+ fieldname: "foo",
+ value: "bar",
+ },
+ {
+ id: "22222",
+ guid: "22222",
+ name: "foo2",
+ fieldname: "foo2",
+ value: "bar2",
+ },
+ {
+ id: "33333",
+ guid: "33333",
+ name: "foo3",
+ fieldname: "foo3",
+ value: "bar3",
+ },
+ ],
+ client: [
+ {
+ id: "11111",
+ guid: "11111",
+ name: "foo",
+ fieldname: "foo",
+ value: "bar",
+ },
+ {
+ id: "22222",
+ guid: "22222",
+ name: "foo2",
+ fieldname: "foo2",
+ value: "bar2",
+ },
+ {
+ id: "33333",
+ guid: "33333",
+ name: "foo3",
+ fieldname: "foo3",
+ value: "bar3",
+ },
+ ],
+ };
+}
+
+add_task(async function test_valid() {
+ let { server, client } = getDummyServerAndClient();
+ let validator = new FormValidator();
+ let { problemData, clientRecords, records, deletedRecords } =
+ await validator.compareClientWithServer(client, server);
+ equal(clientRecords.length, 3);
+ equal(records.length, 3);
+ equal(deletedRecords.length, 0);
+ deepEqual(problemData, validator.emptyProblemData());
+});
+
+add_task(async function test_formValidatorIgnoresMissingClients() {
+ // Since history form records are not deleted from the server, the
+ // |FormValidator| shouldn't set the |missingClient| flag in |problemData|.
+ let { server, client } = getDummyServerAndClient();
+ client.pop();
+
+ let validator = new FormValidator();
+ let { problemData, clientRecords, records, deletedRecords } =
+ await validator.compareClientWithServer(client, server);
+
+ equal(clientRecords.length, 2);
+ equal(records.length, 3);
+ equal(deletedRecords.length, 0);
+
+ let expected = validator.emptyProblemData();
+ deepEqual(problemData, expected);
+});
diff --git a/services/sync/tests/unit/test_forms_store.js b/services/sync/tests/unit/test_forms_store.js
new file mode 100644
index 0000000000..716487865f
--- /dev/null
+++ b/services/sync/tests/unit/test_forms_store.js
@@ -0,0 +1,176 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+_(
+ "Make sure the form store follows the Store api and correctly accesses the backend form storage"
+);
+const { FormEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/forms.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { SyncedRecordsTelemetry } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+);
+
+add_task(async function run_test() {
+ let engine = new FormEngine(Service);
+ await engine.initialize();
+ let store = engine._store;
+
+ async function applyEnsureNoFailures(records) {
+ let countTelemetry = new SyncedRecordsTelemetry();
+ Assert.equal(
+ (await store.applyIncomingBatch(records, countTelemetry)).length,
+ 0
+ );
+ }
+
+ _("Remove any existing entries");
+ await store.wipe();
+ if ((await store.getAllIDs()).length) {
+ do_throw("Shouldn't get any ids!");
+ }
+
+ _("Add a form entry");
+ await applyEnsureNoFailures([
+ {
+ id: Utils.makeGUID(),
+ name: "name!!",
+ value: "value??",
+ },
+ ]);
+
+ _("Should have 1 entry now");
+ let id = "";
+ for (let _id in await store.getAllIDs()) {
+ if (id == "") {
+ id = _id;
+ } else {
+ do_throw("Should have only gotten one!");
+ }
+ }
+ Assert.ok(store.itemExists(id));
+
+ _("Should be able to find this entry as a dupe");
+ Assert.equal(
+ await engine._findDupe({ name: "name!!", value: "value??" }),
+ id
+ );
+
+ let rec = await store.createRecord(id);
+ _("Got record for id", id, rec);
+ Assert.equal(rec.name, "name!!");
+ Assert.equal(rec.value, "value??");
+
+ _("Create a non-existent id for delete");
+ Assert.ok((await store.createRecord("deleted!!")).deleted);
+
+ _("Try updating.. doesn't do anything yet");
+ await store.update({});
+
+ _("Remove all entries");
+ await store.wipe();
+ if ((await store.getAllIDs()).length) {
+ do_throw("Shouldn't get any ids!");
+ }
+
+ _("Add another entry");
+ await applyEnsureNoFailures([
+ {
+ id: Utils.makeGUID(),
+ name: "another",
+ value: "entry",
+ },
+ ]);
+ id = "";
+ for (let _id in await store.getAllIDs()) {
+ if (id == "") {
+ id = _id;
+ } else {
+ do_throw("Should have only gotten one!");
+ }
+ }
+
+ _("Change the id of the new entry to something else");
+ await store.changeItemID(id, "newid");
+
+ _("Make sure it's there");
+ Assert.ok(store.itemExists("newid"));
+
+ _("Remove the entry");
+ await store.remove({
+ id: "newid",
+ });
+ if ((await store.getAllIDs()).length) {
+ do_throw("Shouldn't get any ids!");
+ }
+
+ _("Removing the entry again shouldn't matter");
+ await store.remove({
+ id: "newid",
+ });
+ if ((await store.getAllIDs()).length) {
+ do_throw("Shouldn't get any ids!");
+ }
+
+ _("Add another entry to delete using applyIncomingBatch");
+ let toDelete = {
+ id: Utils.makeGUID(),
+ name: "todelete",
+ value: "entry",
+ };
+ await applyEnsureNoFailures([toDelete]);
+ id = "";
+ for (let _id in await store.getAllIDs()) {
+ if (id == "") {
+ id = _id;
+ } else {
+ do_throw("Should have only gotten one!");
+ }
+ }
+ Assert.ok(store.itemExists(id));
+ // mark entry as deleted
+ toDelete.id = id;
+ toDelete.deleted = true;
+ await applyEnsureNoFailures([toDelete]);
+ if ((await store.getAllIDs()).length) {
+ do_throw("Shouldn't get any ids!");
+ }
+
+ _("Add an entry to wipe");
+ await applyEnsureNoFailures([
+ {
+ id: Utils.makeGUID(),
+ name: "towipe",
+ value: "entry",
+ },
+ ]);
+
+ await store.wipe();
+
+ if ((await store.getAllIDs()).length) {
+ do_throw("Shouldn't get any ids!");
+ }
+
+ _("Ensure we work if formfill is disabled.");
+ Services.prefs.setBoolPref("browser.formfill.enable", false);
+ try {
+ // a search
+ if ((await store.getAllIDs()).length) {
+ do_throw("Shouldn't get any ids!");
+ }
+ // an update.
+ await applyEnsureNoFailures([
+ {
+ id: Utils.makeGUID(),
+ name: "some",
+ value: "entry",
+ },
+ ]);
+ } finally {
+ Services.prefs.clearUserPref("browser.formfill.enable");
+ await store.wipe();
+ }
+});
diff --git a/services/sync/tests/unit/test_forms_tracker.js b/services/sync/tests/unit/test_forms_tracker.js
new file mode 100644
index 0000000000..aee74381ad
--- /dev/null
+++ b/services/sync/tests/unit/test_forms_tracker.js
@@ -0,0 +1,78 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { FormEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/forms.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function run_test() {
+ _("Verify we've got an empty tracker to work with.");
+ let engine = new FormEngine(Service);
+ await engine.initialize();
+ let tracker = engine._tracker;
+
+ let changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ async function addEntry(name, value) {
+ await engine._store.create({ name, value });
+ await engine._tracker.asyncObserver.promiseObserversComplete();
+ }
+ async function removeEntry(name, value) {
+ let guid = await engine._findDupe({ name, value });
+ await engine._store.remove({ id: guid });
+ await engine._tracker.asyncObserver.promiseObserversComplete();
+ }
+
+ try {
+ _("Create an entry. Won't show because we haven't started tracking yet");
+ await addEntry("name", "John Doe");
+ changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+
+ _("Tell the tracker to start tracking changes.");
+ tracker.start();
+ await removeEntry("name", "John Doe");
+ await addEntry("email", "john@doe.com");
+ changes = await tracker.getChangedIDs();
+ do_check_attribute_count(changes, 2);
+
+ _("Notifying twice won't do any harm.");
+ tracker.start();
+ await addEntry("address", "Memory Lane");
+ changes = await tracker.getChangedIDs();
+ do_check_attribute_count(changes, 3);
+
+ _("Check that ignoreAll is respected");
+ await tracker.clearChangedIDs();
+ tracker.score = 0;
+ tracker.ignoreAll = true;
+ await addEntry("username", "johndoe123");
+ await addEntry("favoritecolor", "green");
+ await removeEntry("name", "John Doe");
+ tracker.ignoreAll = false;
+ changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+ equal(tracker.score, 0);
+
+ _("Let's stop tracking again.");
+ await tracker.clearChangedIDs();
+ await tracker.stop();
+ await removeEntry("address", "Memory Lane");
+ changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+
+ _("Notifying twice won't do any harm.");
+ await tracker.stop();
+ await removeEntry("email", "john@doe.com");
+ changes = await tracker.getChangedIDs();
+ do_check_empty(changes);
+ } finally {
+ _("Clean up.");
+ await engine._store.wipe();
+ }
+});
diff --git a/services/sync/tests/unit/test_fxa_node_reassignment.js b/services/sync/tests/unit/test_fxa_node_reassignment.js
new file mode 100644
index 0000000000..0b25df0183
--- /dev/null
+++ b/services/sync/tests/unit/test_fxa_node_reassignment.js
@@ -0,0 +1,399 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+_("Test that node reassignment happens correctly using the FxA identity mgr.");
+// The node-reassignment logic is quite different for FxA than for the legacy
+// provider. In particular, there's no special request necessary for
+// reassignment - it comes from the token server - so we need to ensure the
+// Fxa cluster manager grabs a new token.
+
+const { RESTRequest } = ChromeUtils.importESModule(
+ "resource://services-common/rest.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+const { SyncAuthManager } = ChromeUtils.importESModule(
+ "resource://services-sync/sync_auth.sys.mjs"
+);
+
+add_task(async function setup() {
+ // Disables all built-in engines. Important for avoiding errors thrown by the
+ // add-ons engine.
+ await Service.engineManager.clear();
+
+ // Setup the sync auth manager.
+ Status.__authManager = Service.identity = new SyncAuthManager();
+});
+
+// API-compatible with SyncServer handler. Bind `handler` to something to use
+// as a ServerCollection handler.
+function handleReassign(handler, req, resp) {
+ resp.setStatusLine(req.httpVersion, 401, "Node reassignment");
+ resp.setHeader("Content-Type", "application/json");
+ let reassignBody = JSON.stringify({ error: "401inator in place" });
+ resp.bodyOutputStream.write(reassignBody, reassignBody.length);
+}
+
+var numTokenRequests = 0;
+
+function prepareServer(cbAfterTokenFetch) {
+ syncTestLogging();
+ let config = makeIdentityConfig({ username: "johndoe" });
+ // A server callback to ensure we don't accidentally hit the wrong endpoint
+ // after a node reassignment.
+ let callback = {
+ onRequest(req, resp) {
+ let full = `${req.scheme}://${req.host}:${req.port}${req.path}`;
+ let expected = config.fxaccount.token.endpoint;
+ Assert.ok(
+ full.startsWith(expected),
+ `request made to ${full}, expected ${expected}`
+ );
+ },
+ };
+ Object.setPrototypeOf(callback, SyncServerCallback);
+ let server = new SyncServer(callback);
+ server.registerUser("johndoe");
+ server.start();
+
+ // Set the token endpoint for the initial token request that's done implicitly
+ // via configureIdentity.
+ config.fxaccount.token.endpoint = server.baseURI + "1.1/johndoe/";
+ // And future token fetches will do magic around numReassigns.
+ let numReassigns = 0;
+ return configureIdentity(config).then(() => {
+ Service.identity._tokenServerClient = {
+ getTokenUsingOAuth() {
+ return new Promise(res => {
+ // Build a new URL with trailing zeros for the SYNC_VERSION part - this
+ // will still be seen as equivalent by the test server, but different
+ // by sync itself.
+ numReassigns += 1;
+ let trailingZeros = new Array(numReassigns + 1).join("0");
+ let token = config.fxaccount.token;
+ token.endpoint = server.baseURI + "1.1" + trailingZeros + "/johndoe";
+ token.uid = config.username;
+ _(`test server saw token fetch - endpoint now ${token.endpoint}`);
+ numTokenRequests += 1;
+ res(token);
+ if (cbAfterTokenFetch) {
+ cbAfterTokenFetch();
+ }
+ });
+ },
+ };
+ return server;
+ });
+}
+
+function getReassigned() {
+ try {
+ return Services.prefs.getBoolPref("services.sync.lastSyncReassigned");
+ } catch (ex) {
+ if (ex.result != Cr.NS_ERROR_UNEXPECTED) {
+ do_throw(
+ "Got exception retrieving lastSyncReassigned: " + Log.exceptionStr(ex)
+ );
+ }
+ }
+ return false;
+}
+
+/**
+ * Make a test request to `url`, then watch the result of two syncs
+ * to ensure that a node request was made.
+ * Runs `between` between the two. This can be used to undo deliberate failure
+ * setup, detach observers, etc.
+ */
+async function syncAndExpectNodeReassignment(
+ server,
+ firstNotification,
+ between,
+ secondNotification,
+ url
+) {
+ _("Starting syncAndExpectNodeReassignment\n");
+ let deferred = Promise.withResolvers();
+ async function onwards() {
+ let numTokenRequestsBefore;
+ function onFirstSync() {
+ _("First sync completed.");
+ Svc.Obs.remove(firstNotification, onFirstSync);
+ Svc.Obs.add(secondNotification, onSecondSync);
+
+ Assert.equal(Service.clusterURL, "");
+
+ // Track whether we fetched a new token.
+ numTokenRequestsBefore = numTokenRequests;
+
+ // Allow for tests to clean up error conditions.
+ between();
+ }
+ function onSecondSync() {
+ _("Second sync completed.");
+ Svc.Obs.remove(secondNotification, onSecondSync);
+ Service.scheduler.clearSyncTriggers();
+
+ // Make absolutely sure that any event listeners are done with their work
+ // before we proceed.
+ waitForZeroTimer(function () {
+ _("Second sync nextTick.");
+ Assert.equal(
+ numTokenRequests,
+ numTokenRequestsBefore + 1,
+ "fetched a new token"
+ );
+ Service.startOver().then(() => {
+ server.stop(deferred.resolve);
+ });
+ });
+ }
+
+ Svc.Obs.add(firstNotification, onFirstSync);
+ await Service.sync();
+ }
+
+ // Make sure that we really do get a 401 (but we can only do that if we are
+ // already logged in, as the login process is what sets up the URLs)
+ if (Service.isLoggedIn) {
+ _("Making request to " + url + " which should 401");
+ let request = new RESTRequest(url);
+ await request.get();
+ Assert.equal(request.response.status, 401);
+ CommonUtils.nextTick(onwards);
+ } else {
+ _("Skipping preliminary validation check for a 401 as we aren't logged in");
+ CommonUtils.nextTick(onwards);
+ }
+ await deferred.promise;
+}
+
+// Check that when we sync we don't request a new token by default - our
+// test setup has configured the client with a valid token, and that token
+// should be used to form the cluster URL.
+add_task(async function test_single_token_fetch() {
+ enableValidationPrefs();
+
+ _("Test a normal sync only fetches 1 token");
+
+ let numTokenFetches = 0;
+
+ function afterTokenFetch() {
+ numTokenFetches++;
+ }
+
+ // Set the cluster URL to an "old" version - this is to ensure we don't
+ // use that old cached version for the first sync but prefer the value
+ // we got from the token (and as above, we are also checking we don't grab
+ // a new token). If the test actually attempts to connect to this URL
+ // it will crash.
+ Service.clusterURL = "http://example.com/";
+
+ let server = await prepareServer(afterTokenFetch);
+
+ Assert.ok(!Service.isLoggedIn, "not already logged in");
+ await Service.sync();
+ Assert.equal(Status.sync, SYNC_SUCCEEDED, "sync succeeded");
+ Assert.equal(numTokenFetches, 0, "didn't fetch a new token");
+ // A bit hacky, but given we know how prepareServer works we can deduce
+ // that clusterURL we expect.
+ let expectedClusterURL = server.baseURI + "1.1/johndoe/";
+ Assert.equal(Service.clusterURL, expectedClusterURL);
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_momentary_401_engine() {
+ enableValidationPrefs();
+
+ _("Test a failure for engine URLs that's resolved by reassignment.");
+ let server = await prepareServer();
+ let john = server.user("johndoe");
+
+ _("Enabling the Rotary engine.");
+ let { engine, syncID, tracker } = await registerRotaryEngine();
+
+ // We need the server to be correctly set up prior to experimenting. Do this
+ // through a sync.
+ let global = {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ rotary: { version: engine.version, syncID },
+ };
+ john.createCollection("meta").insert("global", global);
+
+ _("First sync to prepare server contents.");
+ await Service.sync();
+
+ _("Setting up Rotary collection to 401.");
+ let rotary = john.createCollection("rotary");
+ let oldHandler = rotary.collectionHandler;
+ rotary.collectionHandler = handleReassign.bind(this, undefined);
+
+ // We want to verify that the clusterURL pref has been cleared after a 401
+ // inside a sync. Flag the Rotary engine to need syncing.
+ john.collection("rotary").timestamp += 1000;
+
+ function between() {
+ _("Undoing test changes.");
+ rotary.collectionHandler = oldHandler;
+
+ function onLoginStart() {
+ // lastSyncReassigned shouldn't be cleared until a sync has succeeded.
+ _("Ensuring that lastSyncReassigned is still set at next sync start.");
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+ Assert.ok(getReassigned());
+ }
+
+ _("Adding observer that lastSyncReassigned is still set on login.");
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+ }
+
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:sync:finish",
+ between,
+ "weave:service:sync:finish",
+ Service.storageURL + "rotary"
+ );
+
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+});
+
+// This test ends up being a failing info fetch *after we're already logged in*.
+add_task(async function test_momentary_401_info_collections_loggedin() {
+ enableValidationPrefs();
+
+ _(
+ "Test a failure for info/collections after login that's resolved by reassignment."
+ );
+ let server = await prepareServer();
+
+ _("First sync to prepare server contents.");
+ await Service.sync();
+
+ _("Arrange for info/collections to return a 401.");
+ let oldHandler = server.toplevelHandlers.info;
+ server.toplevelHandlers.info = handleReassign;
+
+ function undo() {
+ _("Undoing test changes.");
+ server.toplevelHandlers.info = oldHandler;
+ }
+
+ Assert.ok(Service.isLoggedIn, "already logged in");
+
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:sync:error",
+ undo,
+ "weave:service:sync:finish",
+ Service.infoURL
+ );
+});
+
+// This test ends up being a failing info fetch *before we're logged in*.
+// In this case we expect to recover during the login phase - so the first
+// sync succeeds.
+add_task(async function test_momentary_401_info_collections_loggedout() {
+ enableValidationPrefs();
+
+ _(
+ "Test a failure for info/collections before login that's resolved by reassignment."
+ );
+
+ let oldHandler;
+ let sawTokenFetch = false;
+
+ function afterTokenFetch() {
+ // After a single token fetch, we undo our evil handleReassign hack, so
+ // the next /info request returns the collection instead of a 401
+ server.toplevelHandlers.info = oldHandler;
+ sawTokenFetch = true;
+ }
+
+ let server = await prepareServer(afterTokenFetch);
+
+ // Return a 401 for the next /info request - it will be reset immediately
+ // after a new token is fetched.
+ oldHandler = server.toplevelHandlers.info;
+ server.toplevelHandlers.info = handleReassign;
+
+ Assert.ok(!Service.isLoggedIn, "not already logged in");
+
+ await Service.sync();
+ Assert.equal(Status.sync, SYNC_SUCCEEDED, "sync succeeded");
+ // sync was successful - check we grabbed a new token.
+ Assert.ok(sawTokenFetch, "a new token was fetched by this test.");
+ // and we are done.
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+// This test ends up being a failing meta/global fetch *after we're already logged in*.
+add_task(async function test_momentary_401_storage_loggedin() {
+ enableValidationPrefs();
+
+ _(
+ "Test a failure for any storage URL after login that's resolved by" +
+ "reassignment."
+ );
+ let server = await prepareServer();
+
+ _("First sync to prepare server contents.");
+ await Service.sync();
+
+ _("Arrange for meta/global to return a 401.");
+ let oldHandler = server.toplevelHandlers.storage;
+ server.toplevelHandlers.storage = handleReassign;
+
+ function undo() {
+ _("Undoing test changes.");
+ server.toplevelHandlers.storage = oldHandler;
+ }
+
+ Assert.ok(Service.isLoggedIn, "already logged in");
+
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:sync:error",
+ undo,
+ "weave:service:sync:finish",
+ Service.storageURL + "meta/global"
+ );
+});
+
+// This test ends up being a failing meta/global fetch *before we've logged in*.
+add_task(async function test_momentary_401_storage_loggedout() {
+ enableValidationPrefs();
+
+ _(
+ "Test a failure for any storage URL before login, not just engine parts. " +
+ "Resolved by reassignment."
+ );
+ let server = await prepareServer();
+
+ // Return a 401 for all storage requests.
+ let oldHandler = server.toplevelHandlers.storage;
+ server.toplevelHandlers.storage = handleReassign;
+
+ function undo() {
+ _("Undoing test changes.");
+ server.toplevelHandlers.storage = oldHandler;
+ }
+
+ Assert.ok(!Service.isLoggedIn, "already logged in");
+
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:login:error",
+ undo,
+ "weave:service:sync:finish",
+ Service.storageURL + "meta/global"
+ );
+});
diff --git a/services/sync/tests/unit/test_fxa_service_cluster.js b/services/sync/tests/unit/test_fxa_service_cluster.js
new file mode 100644
index 0000000000..0203d01ef5
--- /dev/null
+++ b/services/sync/tests/unit/test_fxa_service_cluster.js
@@ -0,0 +1,58 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { initializeIdentityWithTokenServerResponse } =
+ ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/fxa_utils.sys.mjs"
+ );
+
+add_task(async function test_findCluster() {
+ _("Test FxA _findCluster()");
+
+ _("_findCluster() throws on 500 errors.");
+ initializeIdentityWithTokenServerResponse({
+ status: 500,
+ headers: [],
+ body: "",
+ });
+
+ await Assert.rejects(
+ Service.identity._findCluster(),
+ /TokenServerClientServerError/
+ );
+
+ _("_findCluster() returns null on authentication errors.");
+ initializeIdentityWithTokenServerResponse({
+ status: 401,
+ headers: { "content-type": "application/json" },
+ body: "{}",
+ });
+
+ let cluster = await Service.identity._findCluster();
+ Assert.strictEqual(cluster, null);
+
+ _("_findCluster() works with correct tokenserver response.");
+ let endpoint = "http://example.com/something";
+ initializeIdentityWithTokenServerResponse({
+ status: 200,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({
+ api_endpoint: endpoint,
+ duration: 300,
+ id: "id",
+ key: "key",
+ uid: "uid",
+ }),
+ });
+
+ cluster = await Service.identity._findCluster();
+ // The cluster manager ensures a trailing "/"
+ Assert.strictEqual(cluster, endpoint + "/");
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+});
diff --git a/services/sync/tests/unit/test_history_engine.js b/services/sync/tests/unit/test_history_engine.js
new file mode 100644
index 0000000000..9cca379b0b
--- /dev/null
+++ b/services/sync/tests/unit/test_history_engine.js
@@ -0,0 +1,429 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { HistoryEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/history.sys.mjs"
+);
+
+// Use only for rawAddVisit.
+XPCOMUtils.defineLazyServiceGetter(
+ this,
+ "asyncHistory",
+ "@mozilla.org/browser/history;1",
+ "mozIAsyncHistory"
+);
+async function rawAddVisit(id, uri, visitPRTime, transitionType) {
+ return new Promise((resolve, reject) => {
+ let results = [];
+ let handler = {
+ handleResult(result) {
+ results.push(result);
+ },
+ handleError(resultCode, placeInfo) {
+ do_throw(`updatePlaces gave error ${resultCode}!`);
+ },
+ handleCompletion(count) {
+ resolve({ results, count });
+ },
+ };
+ asyncHistory.updatePlaces(
+ [
+ {
+ guid: id,
+ uri: typeof uri == "string" ? CommonUtils.makeURI(uri) : uri,
+ visits: [{ visitDate: visitPRTime, transitionType }],
+ },
+ ],
+ handler
+ );
+ });
+}
+
+add_task(async function test_history_download_limit() {
+ let engine = new HistoryEngine(Service);
+ await engine.initialize();
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let lastSync = new_timestamp();
+
+ let collection = server.user("foo").collection("history");
+ for (let i = 0; i < 15; i++) {
+ let id = "place" + i.toString(10).padStart(7, "0");
+ let wbo = new ServerWBO(
+ id,
+ encryptPayload({
+ id,
+ histUri: "http://example.com/" + i,
+ title: "Page " + i,
+ visits: [
+ {
+ date: Date.now() * 1000,
+ type: PlacesUtils.history.TRANSITIONS.TYPED,
+ },
+ {
+ date: Date.now() * 1000,
+ type: PlacesUtils.history.TRANSITIONS.LINK,
+ },
+ ],
+ }),
+ lastSync + 1 + i
+ );
+ wbo.sortindex = 15 - i;
+ collection.insertWBO(wbo);
+ }
+
+ // We have 15 records on the server since the last sync, but our download
+ // limit is 5 records at a time. We should eventually fetch all 15.
+ await engine.setLastSync(lastSync);
+ engine.downloadBatchSize = 4;
+ engine.downloadLimit = 5;
+
+ // Don't actually fetch any backlogged records, so that we can inspect
+ // the backlog between syncs.
+ engine.guidFetchBatchSize = 0;
+
+ let ping = await sync_engine_and_validate_telem(engine, false);
+ deepEqual(ping.engines[0].incoming, { applied: 5 });
+
+ let backlogAfterFirstSync = Array.from(engine.toFetch).sort();
+ deepEqual(backlogAfterFirstSync, [
+ "place0000000",
+ "place0000001",
+ "place0000002",
+ "place0000003",
+ "place0000004",
+ "place0000005",
+ "place0000006",
+ "place0000007",
+ "place0000008",
+ "place0000009",
+ ]);
+
+ // We should have fast-forwarded the last sync time.
+ equal(await engine.getLastSync(), lastSync + 15);
+
+ engine.lastModified = collection.modified;
+ ping = await sync_engine_and_validate_telem(engine, false);
+ ok(!ping.engines[0].incoming);
+
+ // After the second sync, our backlog still contains the same GUIDs: we
+ // weren't able to make progress on fetching them, since our
+ // `guidFetchBatchSize` is 0.
+ let backlogAfterSecondSync = Array.from(engine.toFetch).sort();
+ deepEqual(backlogAfterFirstSync, backlogAfterSecondSync);
+
+ // Now add a newer record to the server.
+ let newWBO = new ServerWBO(
+ "placeAAAAAAA",
+ encryptPayload({
+ id: "placeAAAAAAA",
+ histUri: "http://example.com/a",
+ title: "New Page A",
+ visits: [
+ {
+ date: Date.now() * 1000,
+ type: PlacesUtils.history.TRANSITIONS.TYPED,
+ },
+ ],
+ }),
+ lastSync + 20
+ );
+ newWBO.sortindex = -1;
+ collection.insertWBO(newWBO);
+
+ engine.lastModified = collection.modified;
+ ping = await sync_engine_and_validate_telem(engine, false);
+ deepEqual(ping.engines[0].incoming, { applied: 1 });
+
+ // Our backlog should remain the same.
+ let backlogAfterThirdSync = Array.from(engine.toFetch).sort();
+ deepEqual(backlogAfterSecondSync, backlogAfterThirdSync);
+
+ equal(await engine.getLastSync(), lastSync + 20);
+
+ // Bump the fetch batch size to let the backlog make progress. We should
+ // make 3 requests to fetch 5 backlogged GUIDs.
+ engine.guidFetchBatchSize = 2;
+
+ engine.lastModified = collection.modified;
+ ping = await sync_engine_and_validate_telem(engine, false);
+ deepEqual(ping.engines[0].incoming, { applied: 5 });
+
+ deepEqual(Array.from(engine.toFetch).sort(), [
+ "place0000005",
+ "place0000006",
+ "place0000007",
+ "place0000008",
+ "place0000009",
+ ]);
+
+ // Sync again to clear out the backlog.
+ engine.lastModified = collection.modified;
+ ping = await sync_engine_and_validate_telem(engine, false);
+ deepEqual(ping.engines[0].incoming, { applied: 5 });
+
+ deepEqual(Array.from(engine.toFetch), []);
+
+ await engine.wipeClient();
+ await engine.finalize();
+});
+
+add_task(async function test_history_visit_roundtrip() {
+ let engine = new HistoryEngine(Service);
+ await engine.initialize();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ engine._tracker.start();
+
+ let id = "aaaaaaaaaaaa";
+ let oneHourMS = 60 * 60 * 1000;
+ // Insert a visit with a non-round microsecond timestamp (e.g. it's not evenly
+ // divisible by 1000). This will typically be the case for visits that occur
+ // during normal navigation.
+ let time = (Date.now() - oneHourMS) * 1000 + 555;
+ // We use the low level history api since it lets us provide microseconds
+ let { count } = await rawAddVisit(
+ id,
+ "https://www.example.com",
+ time,
+ PlacesUtils.history.TRANSITIONS.TYPED
+ );
+ equal(count, 1);
+ // Check that it was inserted and that we didn't round on the insert.
+ let visits = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "https://www.example.com"
+ );
+ equal(visits.length, 1);
+ equal(visits[0].date, time);
+
+ let collection = server.user("foo").collection("history");
+
+ // Sync the visit up to the server.
+ await sync_engine_and_validate_telem(engine, false);
+
+ collection.updateRecord(
+ id,
+ cleartext => {
+ // Double-check that we didn't round the visit's timestamp to the nearest
+ // millisecond when uploading.
+ equal(cleartext.visits[0].date, time);
+ // Add a remote visit so that we get past the deepEquals check in reconcile
+ // (otherwise the history engine will skip applying this record). The
+ // contents of this visit don't matter, beyond the fact that it needs to
+ // exist.
+ cleartext.visits.push({
+ date: (Date.now() - oneHourMS / 2) * 1000,
+ type: PlacesUtils.history.TRANSITIONS.LINK,
+ });
+ },
+ new_timestamp() + 10
+ );
+
+ // Force a remote sync.
+ await engine.setLastSync(new_timestamp() - 30);
+ await sync_engine_and_validate_telem(engine, false);
+
+ // Make sure that we didn't duplicate the visit when inserting. (Prior to bug
+ // 1423395, we would insert a duplicate visit, where the timestamp was
+ // effectively `Math.round(microsecondTimestamp / 1000) * 1000`.)
+ visits = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "https://www.example.com"
+ );
+ equal(visits.length, 2);
+
+ await engine.wipeClient();
+ await engine.finalize();
+});
+
+add_task(async function test_history_visit_dedupe_old() {
+ let engine = new HistoryEngine(Service);
+ await engine.initialize();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let initialVisits = Array.from({ length: 25 }, (_, index) => ({
+ transition: PlacesUtils.history.TRANSITION_LINK,
+ date: new Date(Date.UTC(2017, 10, 1 + index)),
+ }));
+ initialVisits.push({
+ transition: PlacesUtils.history.TRANSITION_LINK,
+ date: new Date(),
+ });
+ await PlacesUtils.history.insert({
+ url: "https://www.example.com",
+ visits: initialVisits,
+ });
+
+ let recentVisits = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "https://www.example.com"
+ );
+ equal(recentVisits.length, 20);
+ let { visits: allVisits, guid } = await PlacesUtils.history.fetch(
+ "https://www.example.com",
+ {
+ includeVisits: true,
+ }
+ );
+ equal(allVisits.length, 26);
+
+ let collection = server.user("foo").collection("history");
+
+ await sync_engine_and_validate_telem(engine, false);
+
+ collection.updateRecord(
+ guid,
+ data => {
+ data.visits.push(
+ // Add a couple remote visit equivalent to some old visits we have already
+ {
+ date: Date.UTC(2017, 10, 1) * 1000, // Nov 1, 2017
+ type: PlacesUtils.history.TRANSITIONS.LINK,
+ },
+ {
+ date: Date.UTC(2017, 10, 2) * 1000, // Nov 2, 2017
+ type: PlacesUtils.history.TRANSITIONS.LINK,
+ },
+ // Add a couple new visits to make sure we are still applying them.
+ {
+ date: Date.UTC(2017, 11, 4) * 1000, // Dec 4, 2017
+ type: PlacesUtils.history.TRANSITIONS.LINK,
+ },
+ {
+ date: Date.UTC(2017, 11, 5) * 1000, // Dec 5, 2017
+ type: PlacesUtils.history.TRANSITIONS.LINK,
+ }
+ );
+ },
+ new_timestamp() + 10
+ );
+
+ await engine.setLastSync(new_timestamp() - 30);
+ await sync_engine_and_validate_telem(engine, false);
+
+ allVisits = (
+ await PlacesUtils.history.fetch("https://www.example.com", {
+ includeVisits: true,
+ })
+ ).visits;
+
+ equal(allVisits.length, 28);
+ ok(
+ allVisits.find(x => x.date.getTime() === Date.UTC(2017, 11, 4)),
+ "Should contain the Dec. 4th visit"
+ );
+ ok(
+ allVisits.find(x => x.date.getTime() === Date.UTC(2017, 11, 5)),
+ "Should contain the Dec. 5th visit"
+ );
+
+ await engine.wipeClient();
+ await engine.finalize();
+});
+
+add_task(async function test_history_unknown_fields() {
+ let engine = new HistoryEngine(Service);
+ await engine.initialize();
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ engine._tracker.start();
+
+ let id = "aaaaaaaaaaaa";
+ let oneHourMS = 60 * 60 * 1000;
+ // Insert a visit with a non-round microsecond timestamp (e.g. it's not evenly
+ // divisible by 1000). This will typically be the case for visits that occur
+ // during normal navigation.
+ let time = (Date.now() - oneHourMS) * 1000 + 555;
+ // We use the low level history api since it lets us provide microseconds
+ let { count } = await rawAddVisit(
+ id,
+ "https://www.example.com",
+ time,
+ PlacesUtils.history.TRANSITIONS.TYPED
+ );
+ equal(count, 1);
+
+ let collection = server.user("foo").collection("history");
+
+ // Sync the visit up to the server.
+ await sync_engine_and_validate_telem(engine, false);
+
+ collection.updateRecord(
+ id,
+ cleartext => {
+ equal(cleartext.visits[0].date, time);
+
+ // Add unknown fields to an instance of a visit
+ cleartext.visits.push({
+ date: (Date.now() - oneHourMS / 2) * 1000,
+ type: PlacesUtils.history.TRANSITIONS.LINK,
+ unknownVisitField: "an unknown field could show up in a visit!",
+ });
+ cleartext.title = "A page title";
+ // Add unknown fields to the payload for this URL
+ cleartext.unknownStrField = "an unknown str field";
+ cleartext.unknownObjField = { newField: "a field within an object" };
+ },
+ new_timestamp() + 10
+ );
+
+ // Force a remote sync.
+ await engine.setLastSync(new_timestamp() - 30);
+ await sync_engine_and_validate_telem(engine, false);
+
+ // Add a new visit to ensure we're actually putting things back on the server
+ let newTime = (Date.now() - oneHourMS) * 1000 + 555;
+ await rawAddVisit(
+ id,
+ "https://www.example.com",
+ newTime,
+ PlacesUtils.history.TRANSITIONS.LINK
+ );
+
+ // Sync again
+ await engine.setLastSync(new_timestamp() - 30);
+ await sync_engine_and_validate_telem(engine, false);
+
+ let placeInfo = await PlacesSyncUtils.history.fetchURLInfoForGuid(id);
+
+ // Found the place we're looking for
+ Assert.equal(placeInfo.title, "A page title");
+ Assert.equal(placeInfo.url, "https://www.example.com/");
+
+ // It correctly returns any unknownFields that might've been
+ // stored in the moz_places_extra table
+ deepEqual(JSON.parse(placeInfo.unknownFields), {
+ unknownStrField: "an unknown str field",
+ unknownObjField: { newField: "a field within an object" },
+ });
+
+ // Getting visits via SyncUtils also will return unknownFields
+ // via the moz_historyvisits_extra table
+ let visits = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "https://www.example.com"
+ );
+ equal(visits.length, 3);
+
+ // fetchVisitsForURL is a sync method that gets called during upload
+ // so unknown field should already be at the top-level
+ deepEqual(
+ visits[0].unknownVisitField,
+ "an unknown field could show up in a visit!"
+ );
+
+ // Remote history record should have the fields back at the top level
+ let remotePlace = collection.payloads().find(rec => rec.id === id);
+ deepEqual(remotePlace.unknownStrField, "an unknown str field");
+ deepEqual(remotePlace.unknownObjField, {
+ newField: "a field within an object",
+ });
+
+ await engine.wipeClient();
+ await engine.finalize();
+});
diff --git a/services/sync/tests/unit/test_history_store.js b/services/sync/tests/unit/test_history_store.js
new file mode 100644
index 0000000000..07aee0dd01
--- /dev/null
+++ b/services/sync/tests/unit/test_history_store.js
@@ -0,0 +1,570 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { HistoryEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/history.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { SyncedRecordsTelemetry } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+);
+
+const TIMESTAMP1 = (Date.now() - 103406528) * 1000;
+const TIMESTAMP2 = (Date.now() - 6592903) * 1000;
+const TIMESTAMP3 = (Date.now() - 123894) * 1000;
+
+function promiseOnVisitObserved() {
+ return new Promise(res => {
+ let listener = new PlacesWeakCallbackWrapper(events => {
+ PlacesObservers.removeListener(["page-visited"], listener);
+ res();
+ });
+ PlacesObservers.addListener(["page-visited"], listener);
+ });
+}
+
+function isDateApproximately(actual, expected, skewMillis = 1000) {
+ let lowerBound = expected - skewMillis;
+ let upperBound = expected + skewMillis;
+ return actual >= lowerBound && actual <= upperBound;
+}
+
+let engine, store, fxuri, fxguid, tburi, tbguid;
+
+async function applyEnsureNoFailures(records) {
+ let countTelemetry = new SyncedRecordsTelemetry();
+ Assert.equal(
+ (await store.applyIncomingBatch(records, countTelemetry)).length,
+ 0
+ );
+}
+
+add_task(async function setup() {
+ engine = new HistoryEngine(Service);
+ await engine.initialize();
+ store = engine._store;
+});
+
+add_task(async function test_store() {
+ _("Verify that we've got an empty store to work with.");
+ do_check_empty(await store.getAllIDs());
+
+ _("Let's create an entry in the database.");
+ fxuri = CommonUtils.makeURI("http://getfirefox.com/");
+
+ await PlacesTestUtils.addVisits({
+ uri: fxuri,
+ title: "Get Firefox!",
+ visitDate: TIMESTAMP1,
+ });
+ _("Verify that the entry exists.");
+ let ids = Object.keys(await store.getAllIDs());
+ Assert.equal(ids.length, 1);
+ fxguid = ids[0];
+ Assert.ok(await store.itemExists(fxguid));
+
+ _("If we query a non-existent record, it's marked as deleted.");
+ let record = await store.createRecord("non-existent");
+ Assert.ok(record.deleted);
+
+ _("Verify createRecord() returns a complete record.");
+ record = await store.createRecord(fxguid);
+ Assert.equal(record.histUri, fxuri.spec);
+ Assert.equal(record.title, "Get Firefox!");
+ Assert.equal(record.visits.length, 1);
+ Assert.equal(record.visits[0].date, TIMESTAMP1);
+ Assert.equal(record.visits[0].type, Ci.nsINavHistoryService.TRANSITION_LINK);
+
+ _("Let's modify the record and have the store update the database.");
+ let secondvisit = {
+ date: TIMESTAMP2,
+ type: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ };
+ let onVisitObserved = promiseOnVisitObserved();
+ let updatedRec = await store.createRecord(fxguid);
+ updatedRec.cleartext.title = "Hol Dir Firefox!";
+ updatedRec.cleartext.visits.push(secondvisit);
+ await applyEnsureNoFailures([updatedRec]);
+ await onVisitObserved;
+ let queryres = await PlacesUtils.history.fetch(fxuri.spec, {
+ includeVisits: true,
+ });
+ Assert.equal(queryres.title, "Hol Dir Firefox!");
+ Assert.deepEqual(queryres.visits, [
+ {
+ date: new Date(TIMESTAMP2 / 1000),
+ transition: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ {
+ date: new Date(TIMESTAMP1 / 1000),
+ transition: Ci.nsINavHistoryService.TRANSITION_LINK,
+ },
+ ]);
+ await PlacesUtils.history.clear();
+});
+
+add_task(async function test_store_create() {
+ _("Create a brand new record through the store.");
+ tbguid = Utils.makeGUID();
+ tburi = CommonUtils.makeURI("http://getthunderbird.com");
+ let onVisitObserved = promiseOnVisitObserved();
+ let record = await store.createRecord(tbguid);
+ record.cleartext = {
+ id: tbguid,
+ histUri: tburi.spec,
+ title: "The bird is the word!",
+ visits: [
+ { date: TIMESTAMP3, type: Ci.nsINavHistoryService.TRANSITION_TYPED },
+ ],
+ };
+ await applyEnsureNoFailures([record]);
+ await onVisitObserved;
+ Assert.ok(await store.itemExists(tbguid));
+ do_check_attribute_count(await store.getAllIDs(), 1);
+ let queryres = await PlacesUtils.history.fetch(tburi.spec, {
+ includeVisits: true,
+ });
+ Assert.equal(queryres.title, "The bird is the word!");
+ Assert.deepEqual(queryres.visits, [
+ {
+ date: new Date(TIMESTAMP3 / 1000),
+ transition: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ ]);
+ await PlacesUtils.history.clear();
+});
+
+add_task(async function test_null_title() {
+ _(
+ "Make sure we handle a null title gracefully (it can happen in some cases, e.g. for resource:// URLs)"
+ );
+ let resguid = Utils.makeGUID();
+ let resuri = CommonUtils.makeURI("unknown://title");
+ let record = await store.createRecord(resguid);
+ record.cleartext = {
+ id: resguid,
+ histUri: resuri.spec,
+ title: null,
+ visits: [
+ { date: TIMESTAMP3, type: Ci.nsINavHistoryService.TRANSITION_TYPED },
+ ],
+ };
+ await applyEnsureNoFailures([record]);
+ do_check_attribute_count(await store.getAllIDs(), 1);
+
+ let queryres = await PlacesUtils.history.fetch(resuri.spec, {
+ includeVisits: true,
+ });
+ Assert.equal(queryres.title, "");
+ Assert.deepEqual(queryres.visits, [
+ {
+ date: new Date(TIMESTAMP3 / 1000),
+ transition: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ ]);
+ await PlacesUtils.history.clear();
+});
+
+add_task(async function test_invalid_records() {
+ _("Make sure we handle invalid URLs in places databases gracefully.");
+ await PlacesUtils.withConnectionWrapper(
+ "test_invalid_record",
+ async function (db) {
+ await db.execute(
+ "INSERT INTO moz_places " +
+ "(url, url_hash, title, rev_host, visit_count, last_visit_date) " +
+ "VALUES ('invalid-uri', hash('invalid-uri'), 'Invalid URI', '.', 1, " +
+ TIMESTAMP3 +
+ ")"
+ );
+ // Add the corresponding visit to retain database coherence.
+ await db.execute(
+ "INSERT INTO moz_historyvisits " +
+ "(place_id, visit_date, visit_type, session) " +
+ "VALUES ((SELECT id FROM moz_places WHERE url_hash = hash('invalid-uri') AND url = 'invalid-uri'), " +
+ TIMESTAMP3 +
+ ", " +
+ Ci.nsINavHistoryService.TRANSITION_TYPED +
+ ", 1)"
+ );
+ }
+ );
+ do_check_attribute_count(await store.getAllIDs(), 1);
+
+ _("Make sure we report records with invalid URIs.");
+ let invalid_uri_guid = Utils.makeGUID();
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let failed = await store.applyIncomingBatch(
+ [
+ {
+ id: invalid_uri_guid,
+ histUri: ":::::::::::::::",
+ title: "Doesn't have a valid URI",
+ visits: [
+ { date: TIMESTAMP3, type: Ci.nsINavHistoryService.TRANSITION_EMBED },
+ ],
+ },
+ ],
+ countTelemetry
+ );
+ Assert.equal(failed.length, 1);
+ Assert.equal(failed[0], invalid_uri_guid);
+ Assert.equal(
+ countTelemetry.incomingCounts.failedReasons[0].name,
+ "<URL> is not a valid URL."
+ );
+ Assert.equal(countTelemetry.incomingCounts.failedReasons[0].count, 1);
+
+ _("Make sure we handle records with invalid GUIDs gracefully (ignore).");
+ await applyEnsureNoFailures([
+ {
+ id: "invalid",
+ histUri: "http://invalid.guid/",
+ title: "Doesn't have a valid GUID",
+ visits: [
+ { date: TIMESTAMP3, type: Ci.nsINavHistoryService.TRANSITION_EMBED },
+ ],
+ },
+ ]);
+
+ _(
+ "Make sure we handle records with invalid visit codes or visit dates, gracefully ignoring those visits."
+ );
+ let no_date_visit_guid = Utils.makeGUID();
+ let no_type_visit_guid = Utils.makeGUID();
+ let invalid_type_visit_guid = Utils.makeGUID();
+ let non_integer_visit_guid = Utils.makeGUID();
+ countTelemetry = new SyncedRecordsTelemetry();
+ failed = await store.applyIncomingBatch(
+ [
+ {
+ id: no_date_visit_guid,
+ histUri: "http://no.date.visit/",
+ title: "Visit has no date",
+ visits: [{ type: Ci.nsINavHistoryService.TRANSITION_EMBED }],
+ },
+ {
+ id: no_type_visit_guid,
+ histUri: "http://no.type.visit/",
+ title: "Visit has no type",
+ visits: [{ date: TIMESTAMP3 }],
+ },
+ {
+ id: invalid_type_visit_guid,
+ histUri: "http://invalid.type.visit/",
+ title: "Visit has invalid type",
+ visits: [
+ {
+ date: TIMESTAMP3,
+ type: Ci.nsINavHistoryService.TRANSITION_LINK - 1,
+ },
+ ],
+ },
+ {
+ id: non_integer_visit_guid,
+ histUri: "http://non.integer.visit/",
+ title: "Visit has non-integer date",
+ visits: [
+ { date: 1234.567, type: Ci.nsINavHistoryService.TRANSITION_EMBED },
+ ],
+ },
+ ],
+ countTelemetry
+ );
+ Assert.equal(failed.length, 0);
+
+ // Make sure we can apply tombstones (both valid and invalid)
+ countTelemetry = new SyncedRecordsTelemetry();
+ failed = await store.applyIncomingBatch(
+ [
+ { id: no_date_visit_guid, deleted: true },
+ { id: "not-a-valid-guid", deleted: true },
+ ],
+ countTelemetry
+ );
+ Assert.deepEqual(failed, ["not-a-valid-guid"]);
+ Assert.equal(
+ countTelemetry.incomingCounts.failedReasons[0].name,
+ "<URL> is not a valid URL."
+ );
+
+ _("Make sure we handle records with javascript: URLs gracefully.");
+ await applyEnsureNoFailures(
+ [
+ {
+ id: Utils.makeGUID(),
+ histUri: "javascript:''",
+ title: "javascript:''",
+ visits: [
+ { date: TIMESTAMP3, type: Ci.nsINavHistoryService.TRANSITION_EMBED },
+ ],
+ },
+ ],
+ countTelemetry
+ );
+
+ _("Make sure we handle records without any visits gracefully.");
+ await applyEnsureNoFailures([
+ {
+ id: Utils.makeGUID(),
+ histUri: "http://getfirebug.com",
+ title: "Get Firebug!",
+ visits: [],
+ },
+ ]);
+});
+
+add_task(async function test_unknowingly_invalid_records() {
+ _("Make sure we handle rejection of records by places gracefully.");
+ let oldCAU = store._canAddURI;
+ store._canAddURI = () => true;
+ try {
+ _("Make sure that when places rejects this record we record it as failed");
+ let guid = Utils.makeGUID();
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let invalidRecord = await store.createRecord(guid);
+ invalidRecord.cleartext = {
+ id: guid,
+ histUri: "javascript:''",
+ title: "javascript:''",
+ visits: [
+ {
+ date: TIMESTAMP3,
+ type: Ci.nsINavHistoryService.TRANSITION_EMBED,
+ },
+ ],
+ };
+ let result = await store.applyIncomingBatch(
+ [invalidRecord],
+ countTelemetry
+ );
+ deepEqual(result, [guid]);
+ } finally {
+ store._canAddURI = oldCAU;
+ }
+});
+
+add_task(async function test_clamp_visit_dates() {
+ let futureVisitTime = Date.now() + 5 * 60 * 1000;
+ let recentVisitTime = Date.now() - 5 * 60 * 1000;
+
+ let recordA = await store.createRecord("visitAAAAAAA");
+ recordA.cleartext = {
+ id: "visitAAAAAAA",
+ histUri: "http://example.com/a",
+ title: "A",
+ visits: [
+ {
+ date: "invalidDate",
+ type: Ci.nsINavHistoryService.TRANSITION_LINK,
+ },
+ ],
+ };
+ let recordB = await store.createRecord("visitBBBBBBB");
+ recordB.cleartext = {
+ id: "visitBBBBBBB",
+ histUri: "http://example.com/b",
+ title: "B",
+ visits: [
+ {
+ date: 100,
+ type: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ {
+ date: 250,
+ type: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ {
+ date: recentVisitTime * 1000,
+ type: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ ],
+ };
+ let recordC = await store.createRecord("visitCCCCCCC");
+ recordC.cleartext = {
+ id: "visitCCCCCCC",
+ histUri: "http://example.com/c",
+ title: "D",
+ visits: [
+ {
+ date: futureVisitTime * 1000,
+ type: Ci.nsINavHistoryService.TRANSITION_BOOKMARK,
+ },
+ ],
+ };
+ let recordD = await store.createRecord("visitDDDDDDD");
+ recordD.cleartext = {
+ id: "visitDDDDDDD",
+ histUri: "http://example.com/d",
+ title: "D",
+ visits: [
+ {
+ date: recentVisitTime * 1000,
+ type: Ci.nsINavHistoryService.TRANSITION_DOWNLOAD,
+ },
+ ],
+ };
+ await applyEnsureNoFailures([recordA, recordB, recordC, recordD]);
+
+ let visitsForA = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "http://example.com/a"
+ );
+ deepEqual(visitsForA, [], "Should ignore visits with invalid dates");
+
+ let visitsForB = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "http://example.com/b"
+ );
+ deepEqual(
+ visitsForB,
+ [
+ {
+ date: recentVisitTime * 1000,
+ type: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ {
+ // We should clamp visit dates older than original Mosaic release.
+ date: PlacesSyncUtils.bookmarks.EARLIEST_BOOKMARK_TIMESTAMP * 1000,
+ type: Ci.nsINavHistoryService.TRANSITION_TYPED,
+ },
+ ],
+ "Should record clamped visit and valid visit for B"
+ );
+
+ let visitsForC = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "http://example.com/c"
+ );
+ equal(visitsForC.length, 1, "Should record clamped future visit for C");
+ let visitDateForC = PlacesUtils.toDate(visitsForC[0].date);
+ ok(
+ isDateApproximately(visitDateForC, Date.now()),
+ "Should clamp future visit date for C to now"
+ );
+
+ let visitsForD = await PlacesSyncUtils.history.fetchVisitsForURL(
+ "http://example.com/d"
+ );
+ deepEqual(
+ visitsForD,
+ [
+ {
+ date: recentVisitTime * 1000,
+ type: Ci.nsINavHistoryService.TRANSITION_DOWNLOAD,
+ },
+ ],
+ "Should not clamp valid visit dates"
+ );
+});
+
+add_task(async function test_remove() {
+ _("Remove an existent record and a non-existent from the store.");
+ await applyEnsureNoFailures([
+ { id: fxguid, deleted: true },
+ { id: Utils.makeGUID(), deleted: true },
+ ]);
+ Assert.equal(false, await store.itemExists(fxguid));
+ let queryres = await PlacesUtils.history.fetch(fxuri.spec, {
+ includeVisits: true,
+ });
+ Assert.equal(null, queryres);
+
+ _("Make sure wipe works.");
+ await store.wipe();
+ do_check_empty(await store.getAllIDs());
+ queryres = await PlacesUtils.history.fetch(fxuri.spec, {
+ includeVisits: true,
+ });
+ Assert.equal(null, queryres);
+ queryres = await PlacesUtils.history.fetch(tburi.spec, {
+ includeVisits: true,
+ });
+ Assert.equal(null, queryres);
+});
+
+add_task(async function test_chunking() {
+ let mvpi = store.MAX_VISITS_PER_INSERT;
+ store.MAX_VISITS_PER_INSERT = 3;
+ let checkChunks = function (input, expected) {
+ let chunks = Array.from(store._generateChunks(input));
+ deepEqual(chunks, expected);
+ };
+ try {
+ checkChunks([{ visits: ["x"] }], [[{ visits: ["x"] }]]);
+
+ // 3 should still be one chunk.
+ checkChunks([{ visits: ["x", "x", "x"] }], [[{ visits: ["x", "x", "x"] }]]);
+
+ // 4 should still be one chunk as we don't split individual records.
+ checkChunks(
+ [{ visits: ["x", "x", "x", "x"] }],
+ [[{ visits: ["x", "x", "x", "x"] }]]
+ );
+
+ // 4 in the first and 1 in the second should be 2 chunks.
+ checkChunks(
+ [{ visits: ["x", "x", "x", "x"] }, { visits: ["x"] }],
+ // expected
+ [[{ visits: ["x", "x", "x", "x"] }], [{ visits: ["x"] }]]
+ );
+
+ // we put multiple records into chunks
+ checkChunks(
+ [
+ { visits: ["x", "x"] },
+ { visits: ["x"] },
+ { visits: ["x"] },
+ { visits: ["x", "x"] },
+ { visits: ["x", "x", "x", "x"] },
+ ],
+ // expected
+ [
+ [{ visits: ["x", "x"] }, { visits: ["x"] }],
+ [{ visits: ["x"] }, { visits: ["x", "x"] }],
+ [{ visits: ["x", "x", "x", "x"] }],
+ ]
+ );
+ } finally {
+ store.MAX_VISITS_PER_INSERT = mvpi;
+ }
+});
+
+add_task(async function test_getAllIDs_filters_file_uris() {
+ let uri = CommonUtils.makeURI("file:///Users/eoger/tps/config.json");
+ let visitAddedPromise = promiseVisit("added", uri);
+ await PlacesTestUtils.addVisits({
+ uri,
+ visitDate: Date.now() * 1000,
+ transition: PlacesUtils.history.TRANSITION_LINK,
+ });
+ await visitAddedPromise;
+
+ do_check_attribute_count(await store.getAllIDs(), 0);
+
+ await PlacesUtils.history.clear();
+});
+
+add_task(async function test_applyIncomingBatch_filters_file_uris() {
+ const guid = Utils.makeGUID();
+ let uri = CommonUtils.makeURI("file:///Users/eoger/tps/config.json");
+ await applyEnsureNoFailures([
+ {
+ id: guid,
+ histUri: uri.spec,
+ title: "TPS CONFIG",
+ visits: [
+ { date: TIMESTAMP3, type: Ci.nsINavHistoryService.TRANSITION_TYPED },
+ ],
+ },
+ ]);
+ Assert.equal(false, await store.itemExists(guid));
+ let queryres = await PlacesUtils.history.fetch(uri.spec, {
+ includeVisits: true,
+ });
+ Assert.equal(null, queryres);
+});
+
+add_task(async function cleanup() {
+ _("Clean up.");
+ await PlacesUtils.history.clear();
+});
diff --git a/services/sync/tests/unit/test_history_tracker.js b/services/sync/tests/unit/test_history_tracker.js
new file mode 100644
index 0000000000..6f351d6984
--- /dev/null
+++ b/services/sync/tests/unit/test_history_tracker.js
@@ -0,0 +1,251 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { PlacesDBUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/PlacesDBUtils.sys.mjs"
+);
+const { HistoryEngine } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/history.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+let engine;
+let tracker;
+
+add_task(async function setup() {
+ await Service.engineManager.clear();
+ await Service.engineManager.register(HistoryEngine);
+ engine = Service.engineManager.get("history");
+ tracker = engine._tracker;
+});
+
+async function verifyTrackerEmpty() {
+ let changes = await engine.pullNewChanges();
+ do_check_empty(changes);
+ equal(tracker.score, 0);
+}
+
+async function verifyTrackedCount(expected) {
+ let changes = await engine.pullNewChanges();
+ do_check_attribute_count(changes, expected);
+}
+
+async function verifyTrackedItems(tracked) {
+ let changes = await engine.pullNewChanges();
+ let trackedIDs = new Set(Object.keys(changes));
+ for (let guid of tracked) {
+ ok(guid in changes, `${guid} should be tracked`);
+ ok(changes[guid] > 0, `${guid} should have a modified time`);
+ trackedIDs.delete(guid);
+ }
+ equal(
+ trackedIDs.size,
+ 0,
+ `Unhandled tracked IDs: ${JSON.stringify(Array.from(trackedIDs))}`
+ );
+}
+
+async function resetTracker() {
+ await tracker.clearChangedIDs();
+ tracker.resetScore();
+}
+
+async function cleanup() {
+ await PlacesUtils.history.clear();
+ await resetTracker();
+ await tracker.stop();
+}
+
+add_task(async function test_empty() {
+ _("Verify we've got an empty, disabled tracker to work with.");
+ await verifyTrackerEmpty();
+ Assert.ok(!tracker._isTracking);
+
+ await cleanup();
+});
+
+add_task(async function test_not_tracking() {
+ _("Create history item. Won't show because we haven't started tracking yet");
+ await addVisit("not_tracking");
+ await verifyTrackerEmpty();
+
+ await cleanup();
+});
+
+add_task(async function test_start_tracking() {
+ _("Add hook for save completion.");
+ let savePromise = new Promise((resolve, reject) => {
+ let save = tracker._storage._save;
+ tracker._storage._save = async function () {
+ try {
+ await save.call(this);
+ resolve();
+ } catch (ex) {
+ reject(ex);
+ } finally {
+ tracker._storage._save = save;
+ }
+ };
+ });
+
+ _("Tell the tracker to start tracking changes.");
+ tracker.start();
+ let scorePromise = promiseOneObserver("weave:engine:score:updated");
+ await addVisit("start_tracking");
+ await scorePromise;
+
+ _("Score updated in test_start_tracking.");
+ await verifyTrackedCount(1);
+ Assert.equal(tracker.score, SCORE_INCREMENT_SMALL);
+
+ await savePromise;
+
+ _("changedIDs written to disk. Proceeding.");
+ await cleanup();
+});
+
+add_task(async function test_start_tracking_twice() {
+ _("Verifying preconditions.");
+ tracker.start();
+ await addVisit("start_tracking_twice1");
+ await verifyTrackedCount(1);
+ Assert.equal(tracker.score, SCORE_INCREMENT_SMALL);
+
+ _("Notifying twice won't do any harm.");
+ tracker.start();
+ let scorePromise = promiseOneObserver("weave:engine:score:updated");
+ await addVisit("start_tracking_twice2");
+ await scorePromise;
+
+ _("Score updated in test_start_tracking_twice.");
+ await verifyTrackedCount(2);
+ Assert.equal(tracker.score, 2 * SCORE_INCREMENT_SMALL);
+
+ await cleanup();
+});
+
+add_task(async function test_track_delete() {
+ _("Deletions are tracked.");
+
+ // This isn't present because we weren't tracking when it was visited.
+ await addVisit("track_delete");
+ let uri = CommonUtils.makeURI("http://getfirefox.com/track_delete");
+ let guid = await engine._store.GUIDForUri(uri.spec);
+ await verifyTrackerEmpty();
+
+ tracker.start();
+ let visitRemovedPromise = promiseVisit("removed", uri);
+ let scorePromise = promiseOneObserver("weave:engine:score:updated");
+ await PlacesUtils.history.remove(uri);
+ await Promise.all([scorePromise, visitRemovedPromise]);
+
+ await verifyTrackedItems([guid]);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+
+ await cleanup();
+});
+
+add_task(async function test_dont_track_expiration() {
+ _("Expirations are not tracked.");
+ let uriToRemove = await addVisit("to_remove");
+ let guidToRemove = await engine._store.GUIDForUri(uriToRemove.spec);
+
+ await resetTracker();
+ await verifyTrackerEmpty();
+
+ tracker.start();
+ let visitRemovedPromise = promiseVisit("removed", uriToRemove);
+ let scorePromise = promiseOneObserver("weave:engine:score:updated");
+
+ // Observe expiration.
+ Services.obs.addObserver(function onExpiration(aSubject, aTopic, aData) {
+ Services.obs.removeObserver(onExpiration, aTopic);
+ // Remove the remaining page to update its score.
+ PlacesUtils.history.remove(uriToRemove);
+ }, PlacesUtils.TOPIC_EXPIRATION_FINISHED);
+
+ // Force expiration of 1 entry.
+ Services.prefs.setIntPref("places.history.expiration.max_pages", 0);
+ Cc["@mozilla.org/places/expiration;1"]
+ .getService(Ci.nsIObserver)
+ .observe(null, "places-debug-start-expiration", 1);
+
+ await Promise.all([scorePromise, visitRemovedPromise]);
+ await verifyTrackedItems([guidToRemove]);
+
+ await cleanup();
+});
+
+add_task(async function test_stop_tracking() {
+ _("Let's stop tracking again.");
+ await tracker.stop();
+ await addVisit("stop_tracking");
+ await verifyTrackerEmpty();
+
+ await cleanup();
+});
+
+add_task(async function test_stop_tracking_twice() {
+ await tracker.stop();
+ await addVisit("stop_tracking_twice1");
+
+ _("Notifying twice won't do any harm.");
+ await tracker.stop();
+ await addVisit("stop_tracking_twice2");
+ await verifyTrackerEmpty();
+
+ await cleanup();
+});
+
+add_task(async function test_filter_file_uris() {
+ tracker.start();
+
+ let uri = CommonUtils.makeURI("file:///Users/eoger/tps/config.json");
+ let visitAddedPromise = promiseVisit("added", uri);
+ await PlacesTestUtils.addVisits({
+ uri,
+ visitDate: Date.now() * 1000,
+ transition: PlacesUtils.history.TRANSITION_LINK,
+ });
+ await visitAddedPromise;
+
+ await verifyTrackerEmpty();
+ await tracker.stop();
+ await cleanup();
+});
+
+add_task(async function test_filter_hidden() {
+ tracker.start();
+
+ _("Add visit; should be hidden by the redirect");
+ let hiddenURI = await addVisit("hidden");
+ let hiddenGUID = await engine._store.GUIDForUri(hiddenURI.spec);
+ _(`Hidden visit GUID: ${hiddenGUID}`);
+
+ _("Add redirect visit; should be tracked");
+ let trackedURI = await addVisit(
+ "redirect",
+ hiddenURI.spec,
+ PlacesUtils.history.TRANSITION_REDIRECT_PERMANENT
+ );
+ let trackedGUID = await engine._store.GUIDForUri(trackedURI.spec);
+ _(`Tracked visit GUID: ${trackedGUID}`);
+
+ _("Add visit for framed link; should be ignored");
+ let embedURI = await addVisit(
+ "framed_link",
+ null,
+ PlacesUtils.history.TRANSITION_FRAMED_LINK
+ );
+ let embedGUID = await engine._store.GUIDForUri(embedURI.spec);
+ _(`Framed link visit GUID: ${embedGUID}`);
+
+ _("Run Places maintenance to mark redirect visit as hidden");
+ await PlacesDBUtils.maintenanceOnIdle();
+
+ await verifyTrackedItems([trackedGUID]);
+
+ await cleanup();
+});
diff --git a/services/sync/tests/unit/test_hmac_error.js b/services/sync/tests/unit/test_hmac_error.js
new file mode 100644
index 0000000000..26dbc12dea
--- /dev/null
+++ b/services/sync/tests/unit/test_hmac_error.js
@@ -0,0 +1,250 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+// Track HMAC error counts.
+var hmacErrorCount = 0;
+(function () {
+ let hHE = Service.handleHMACEvent;
+ Service.handleHMACEvent = async function () {
+ hmacErrorCount++;
+ return hHE.call(Service);
+ };
+})();
+
+async function shared_setup() {
+ enableValidationPrefs();
+ syncTestLogging();
+
+ hmacErrorCount = 0;
+
+ let clientsEngine = Service.clientsEngine;
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+
+ // Make sure RotaryEngine is the only one we sync.
+ let { engine, syncID, tracker } = await registerRotaryEngine();
+ await engine.setLastSync(123); // Needs to be non-zero so that tracker is queried.
+ engine._store.items = {
+ flying: "LNER Class A3 4472",
+ scotsman: "Flying Scotsman",
+ };
+ await tracker.addChangedID("scotsman", 0);
+ Assert.equal(1, Service.engineManager.getEnabled().length);
+
+ let engines = {
+ rotary: { version: engine.version, syncID },
+ clients: { version: clientsEngine.version, syncID: clientsSyncID },
+ };
+
+ // Common server objects.
+ let global = new ServerWBO("global", { engines });
+ let keysWBO = new ServerWBO("keys");
+ let rotaryColl = new ServerCollection({}, true);
+ let clientsColl = new ServerCollection({}, true);
+
+ return [engine, rotaryColl, clientsColl, keysWBO, global, tracker];
+}
+
+add_task(async function hmac_error_during_404() {
+ _("Attempt to replicate the HMAC error setup.");
+ let [engine, rotaryColl, clientsColl, keysWBO, global, tracker] =
+ await shared_setup();
+
+ // Hand out 404s for crypto/keys.
+ let keysHandler = keysWBO.handler();
+ let key404Counter = 0;
+ let keys404Handler = function (request, response) {
+ if (key404Counter > 0) {
+ let body = "Not Found";
+ response.setStatusLine(request.httpVersion, 404, body);
+ response.bodyOutputStream.write(body, body.length);
+ key404Counter--;
+ return;
+ }
+ keysHandler(request, response);
+ };
+
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+ let handlers = {
+ "/1.1/foo/info/collections": collectionsHelper.handler,
+ "/1.1/foo/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/foo/storage/crypto/keys": upd("crypto", keys404Handler),
+ "/1.1/foo/storage/clients": upd("clients", clientsColl.handler()),
+ "/1.1/foo/storage/rotary": upd("rotary", rotaryColl.handler()),
+ };
+
+ let server = sync_httpd_setup(handlers);
+ // Do not instantiate SyncTestingInfrastructure; we need real crypto.
+ await configureIdentity({ username: "foo" }, server);
+ await Service.login();
+
+ try {
+ _("Syncing.");
+ await sync_and_validate_telem();
+
+ _(
+ "Partially resetting client, as if after a restart, and forcing redownload."
+ );
+ Service.collectionKeys.clear();
+ await engine.setLastSync(0); // So that we redownload records.
+ key404Counter = 1;
+ _("---------------------------");
+ await sync_and_validate_telem();
+ _("---------------------------");
+
+ // Two rotary items, one client record... no errors.
+ Assert.equal(hmacErrorCount, 0);
+ } finally {
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Service.recordManager.clearCache();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function hmac_error_during_node_reassignment() {
+ _("Attempt to replicate an HMAC error during node reassignment.");
+ let [engine, rotaryColl, clientsColl, keysWBO, global, tracker] =
+ await shared_setup();
+
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+
+ // We'll provide a 401 mid-way through the sync. This function
+ // simulates shifting to a node which has no data.
+ function on401() {
+ _("Deleting server data...");
+ global.delete();
+ rotaryColl.delete();
+ keysWBO.delete();
+ clientsColl.delete();
+ delete collectionsHelper.collections.rotary;
+ delete collectionsHelper.collections.crypto;
+ delete collectionsHelper.collections.clients;
+ _("Deleted server data.");
+ }
+
+ let should401 = false;
+ function upd401(coll, handler) {
+ return function (request, response) {
+ if (should401 && request.method != "DELETE") {
+ on401();
+ should401 = false;
+ let body = '"reassigned!"';
+ response.setStatusLine(request.httpVersion, 401, "Node reassignment.");
+ response.bodyOutputStream.write(body, body.length);
+ return;
+ }
+ handler(request, response);
+ };
+ }
+
+ let handlers = {
+ "/1.1/foo/info/collections": collectionsHelper.handler,
+ "/1.1/foo/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/foo/storage/crypto/keys": upd("crypto", keysWBO.handler()),
+ "/1.1/foo/storage/clients": upd401("clients", clientsColl.handler()),
+ "/1.1/foo/storage/rotary": upd("rotary", rotaryColl.handler()),
+ };
+
+ let server = sync_httpd_setup(handlers);
+ // Do not instantiate SyncTestingInfrastructure; we need real crypto.
+ await configureIdentity({ username: "foo" }, server);
+
+ _("Syncing.");
+ // First hit of clients will 401. This will happen after meta/global and
+ // keys -- i.e., in the middle of the sync, but before RotaryEngine.
+ should401 = true;
+
+ // Use observers to perform actions when our sync finishes.
+ // This allows us to observe the automatic next-tick sync that occurs after
+ // an abort.
+ function onSyncError() {
+ do_throw("Should not get a sync error!");
+ }
+ let onSyncFinished = function () {};
+ let obs = {
+ observe: function observe(subject, topic, data) {
+ switch (topic) {
+ case "weave:service:sync:error":
+ onSyncError();
+ break;
+ case "weave:service:sync:finish":
+ onSyncFinished();
+ break;
+ }
+ },
+ };
+
+ Svc.Obs.add("weave:service:sync:finish", obs);
+ Svc.Obs.add("weave:service:sync:error", obs);
+
+ // This kicks off the actual test. Split into a function here to allow this
+ // source file to broadly follow actual execution order.
+ async function onwards() {
+ _("== Invoking first sync.");
+ await Service.sync();
+ _("We should not simultaneously have data but no keys on the server.");
+ let hasData = rotaryColl.wbo("flying") || rotaryColl.wbo("scotsman");
+ let hasKeys = keysWBO.modified;
+
+ _("We correctly handle 401s by aborting the sync and starting again.");
+ Assert.ok(!hasData == !hasKeys);
+
+ _("Be prepared for the second (automatic) sync...");
+ }
+
+ _("Make sure that syncing again causes recovery.");
+ let callbacksPromise = new Promise(resolve => {
+ onSyncFinished = function () {
+ _("== First sync done.");
+ _("---------------------------");
+ onSyncFinished = function () {
+ _("== Second (automatic) sync done.");
+ let hasData = rotaryColl.wbo("flying") || rotaryColl.wbo("scotsman");
+ let hasKeys = keysWBO.modified;
+ Assert.ok(!hasData == !hasKeys);
+
+ // Kick off another sync. Can't just call it, because we're inside the
+ // lock...
+ (async () => {
+ await Async.promiseYield();
+ _("Now a fresh sync will get no HMAC errors.");
+ _(
+ "Partially resetting client, as if after a restart, and forcing redownload."
+ );
+ Service.collectionKeys.clear();
+ await engine.setLastSync(0);
+ hmacErrorCount = 0;
+
+ onSyncFinished = async function () {
+ // Two rotary items, one client record... no errors.
+ Assert.equal(hmacErrorCount, 0);
+
+ Svc.Obs.remove("weave:service:sync:finish", obs);
+ Svc.Obs.remove("weave:service:sync:error", obs);
+
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Service.recordManager.clearCache();
+ server.stop(resolve);
+ };
+
+ Service.sync();
+ })().catch(console.error);
+ };
+ };
+ });
+ await onwards();
+ await callbacksPromise;
+});
diff --git a/services/sync/tests/unit/test_httpd_sync_server.js b/services/sync/tests/unit/test_httpd_sync_server.js
new file mode 100644
index 0000000000..6ac8ff5e04
--- /dev/null
+++ b/services/sync/tests/unit/test_httpd_sync_server.js
@@ -0,0 +1,250 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+add_test(function test_creation() {
+ // Explicit callback for this one.
+ let server = new SyncServer(Object.create(SyncServerCallback));
+ Assert.ok(!!server); // Just so we have a check.
+ server.start(null, function () {
+ _("Started on " + server.port);
+ server.stop(run_next_test);
+ });
+});
+
+add_test(function test_url_parsing() {
+ let server = new SyncServer();
+
+ // Check that we can parse a WBO URI.
+ let parts = server.pathRE.exec("/1.1/johnsmith/storage/crypto/keys");
+ let [all, version, username, first, rest] = parts;
+ Assert.equal(all, "/1.1/johnsmith/storage/crypto/keys");
+ Assert.equal(version, "1.1");
+ Assert.equal(username, "johnsmith");
+ Assert.equal(first, "storage");
+ Assert.equal(rest, "crypto/keys");
+ Assert.equal(null, server.pathRE.exec("/nothing/else"));
+
+ // Check that we can parse a collection URI.
+ parts = server.pathRE.exec("/1.1/johnsmith/storage/crypto");
+ [all, version, username, first, rest] = parts;
+ Assert.equal(all, "/1.1/johnsmith/storage/crypto");
+ Assert.equal(version, "1.1");
+ Assert.equal(username, "johnsmith");
+ Assert.equal(first, "storage");
+ Assert.equal(rest, "crypto");
+
+ // We don't allow trailing slash on storage URI.
+ parts = server.pathRE.exec("/1.1/johnsmith/storage/");
+ Assert.equal(parts, undefined);
+
+ // storage alone is a valid request.
+ parts = server.pathRE.exec("/1.1/johnsmith/storage");
+ [all, version, username, first, rest] = parts;
+ Assert.equal(all, "/1.1/johnsmith/storage");
+ Assert.equal(version, "1.1");
+ Assert.equal(username, "johnsmith");
+ Assert.equal(first, "storage");
+ Assert.equal(rest, undefined);
+
+ parts = server.storageRE.exec("storage");
+ let collection;
+ [all, , collection] = parts;
+ Assert.equal(all, "storage");
+ Assert.equal(collection, undefined);
+
+ run_next_test();
+});
+
+const { RESTRequest } = ChromeUtils.importESModule(
+ "resource://services-common/rest.sys.mjs"
+);
+function localRequest(server, path) {
+ _("localRequest: " + path);
+ let url = server.baseURI.substr(0, server.baseURI.length - 1) + path;
+ _("url: " + url);
+ return new RESTRequest(url);
+}
+
+add_task(async function test_basic_http() {
+ let server = new SyncServer();
+ server.registerUser("john", "password");
+ Assert.ok(server.userExists("john"));
+ server.start();
+ _("Started on " + server.port);
+
+ let req = localRequest(server, "/1.1/john/storage/crypto/keys");
+ _("req is " + req);
+ // Shouldn't reject, beyond that we don't care.
+ await req.get();
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_info_collections() {
+ let server = new SyncServer(Object.create(SyncServerCallback));
+ function responseHasCorrectHeaders(r) {
+ Assert.equal(r.status, 200);
+ Assert.equal(r.headers["content-type"], "application/json");
+ Assert.ok("x-weave-timestamp" in r.headers);
+ }
+
+ server.registerUser("john", "password");
+ server.start();
+
+ let req = localRequest(server, "/1.1/john/info/collections");
+ await req.get();
+ responseHasCorrectHeaders(req.response);
+ Assert.equal(req.response.body, "{}");
+
+ let putReq = localRequest(server, "/1.1/john/storage/crypto/keys");
+ let payload = JSON.stringify({ foo: "bar" });
+ let putResp = await putReq.put(payload);
+
+ responseHasCorrectHeaders(putResp);
+
+ let putResponseBody = putResp.body;
+ _("PUT response body: " + JSON.stringify(putResponseBody));
+
+ // When we PUT something to crypto/keys, "crypto" appears in the response.
+ req = localRequest(server, "/1.1/john/info/collections");
+
+ await req.get();
+ responseHasCorrectHeaders(req.response);
+ let expectedColl = server.getCollection("john", "crypto");
+ Assert.ok(!!expectedColl);
+ let modified = expectedColl.timestamp;
+ Assert.ok(modified > 0);
+ Assert.equal(putResponseBody, modified);
+ Assert.equal(JSON.parse(req.response.body).crypto, modified);
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_storage_request() {
+ let keysURL = "/1.1/john/storage/crypto/keys?foo=bar";
+ let foosURL = "/1.1/john/storage/crypto/foos";
+ let storageURL = "/1.1/john/storage";
+
+ let server = new SyncServer();
+ let creation = server.timestamp();
+ server.registerUser("john", "password");
+
+ server.createContents("john", {
+ crypto: { foos: { foo: "bar" } },
+ });
+ let coll = server.user("john").collection("crypto");
+ Assert.ok(!!coll);
+
+ _("We're tracking timestamps.");
+ Assert.ok(coll.timestamp >= creation);
+
+ async function retrieveWBONotExists() {
+ let req = localRequest(server, keysURL);
+ let response = await req.get();
+ _("Body is " + response.body);
+ _("Modified is " + response.newModified);
+ Assert.equal(response.status, 404);
+ Assert.equal(response.body, "Not found");
+ }
+
+ async function retrieveWBOExists() {
+ let req = localRequest(server, foosURL);
+ let response = await req.get();
+ _("Body is " + response.body);
+ _("Modified is " + response.newModified);
+ let parsedBody = JSON.parse(response.body);
+ Assert.equal(parsedBody.id, "foos");
+ Assert.equal(parsedBody.modified, coll.wbo("foos").modified);
+ Assert.equal(JSON.parse(parsedBody.payload).foo, "bar");
+ }
+
+ async function deleteWBONotExists() {
+ let req = localRequest(server, keysURL);
+ server.callback.onItemDeleted = function (username, collection, wboID) {
+ do_throw("onItemDeleted should not have been called.");
+ };
+
+ let response = await req.delete();
+
+ _("Body is " + response.body);
+ _("Modified is " + response.newModified);
+ Assert.equal(response.status, 200);
+ delete server.callback.onItemDeleted;
+ }
+
+ async function deleteWBOExists() {
+ let req = localRequest(server, foosURL);
+ server.callback.onItemDeleted = function (username, collection, wboID) {
+ _("onItemDeleted called for " + collection + "/" + wboID);
+ delete server.callback.onItemDeleted;
+ Assert.equal(username, "john");
+ Assert.equal(collection, "crypto");
+ Assert.equal(wboID, "foos");
+ };
+ await req.delete();
+ _("Body is " + req.response.body);
+ _("Modified is " + req.response.newModified);
+ Assert.equal(req.response.status, 200);
+ }
+
+ async function deleteStorage() {
+ _("Testing DELETE on /storage.");
+ let now = server.timestamp();
+ _("Timestamp: " + now);
+ let req = localRequest(server, storageURL);
+ await req.delete();
+
+ _("Body is " + req.response.body);
+ _("Modified is " + req.response.newModified);
+ let parsedBody = JSON.parse(req.response.body);
+ Assert.ok(parsedBody >= now);
+ do_check_empty(server.users.john.collections);
+ }
+
+ async function getStorageFails() {
+ _("Testing that GET on /storage fails.");
+ let req = localRequest(server, storageURL);
+ await req.get();
+ Assert.equal(req.response.status, 405);
+ Assert.equal(req.response.headers.allow, "DELETE");
+ }
+
+ async function getMissingCollectionWBO() {
+ _("Testing that fetching a WBO from an on-existent collection 404s.");
+ let req = localRequest(server, storageURL + "/foobar/baz");
+ await req.get();
+ Assert.equal(req.response.status, 404);
+ }
+
+ server.start(null);
+
+ await retrieveWBONotExists();
+ await retrieveWBOExists();
+ await deleteWBOExists();
+ await deleteWBONotExists();
+ await getStorageFails();
+ await getMissingCollectionWBO();
+ await deleteStorage();
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_x_weave_records() {
+ let server = new SyncServer();
+ server.registerUser("john", "password");
+
+ server.createContents("john", {
+ crypto: { foos: { foo: "bar" }, bars: { foo: "baz" } },
+ });
+ server.start();
+
+ let wbo = localRequest(server, "/1.1/john/storage/crypto/foos");
+ await wbo.get();
+ Assert.equal(false, "x-weave-records" in wbo.response.headers);
+ let col = localRequest(server, "/1.1/john/storage/crypto");
+ await col.get();
+ // Collection fetches do.
+ Assert.equal(col.response.headers["x-weave-records"], "2");
+
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_interval_triggers.js b/services/sync/tests/unit/test_interval_triggers.js
new file mode 100644
index 0000000000..6f2821ec45
--- /dev/null
+++ b/services/sync/tests/unit/test_interval_triggers.js
@@ -0,0 +1,472 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+Svc.PrefBranch.setStringPref("registerEngines", "");
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+let scheduler;
+let clientsEngine;
+
+async function sync_httpd_setup() {
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+ let global = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {
+ clients: { version: clientsEngine.version, syncID: clientsSyncID },
+ },
+ });
+ let clientsColl = new ServerCollection({}, true);
+
+ // Tracking info/collections.
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+
+ return httpd_setup({
+ "/1.1/johndoe/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/johndoe/info/collections": collectionsHelper.handler,
+ "/1.1/johndoe/storage/crypto/keys": upd(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler()),
+ });
+}
+
+async function setUp(server) {
+ syncTestLogging();
+ await configureIdentity({ username: "johndoe" }, server);
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ await serverKeys.upload(Service.resource(Service.cryptoKeysURL));
+}
+
+add_task(async function setup() {
+ scheduler = Service.scheduler;
+ clientsEngine = Service.clientsEngine;
+
+ // Don't remove stale clients when syncing. This is a test-only workaround
+ // that lets us add clients directly to the store, without losing them on
+ // the next sync.
+ clientsEngine._removeRemoteClient = async id => {};
+});
+
+add_task(async function test_successful_sync_adjustSyncInterval() {
+ enableValidationPrefs();
+
+ _("Test successful sync calling adjustSyncInterval");
+ let syncSuccesses = 0;
+ function onSyncFinish() {
+ _("Sync success.");
+ syncSuccesses++;
+ }
+ Svc.Obs.add("weave:service:sync:finish", onSyncFinish);
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Confirm defaults
+ Assert.ok(!scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.ok(!scheduler.hasIncomingItems);
+
+ _("Test as long as numClients <= 1 our sync interval is SINGLE_USER.");
+ // idle == true && numClients <= 1 && hasIncomingItems == false
+ scheduler.idle = true;
+ await Service.sync();
+ Assert.equal(syncSuccesses, 1);
+ Assert.ok(scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ // idle == false && numClients <= 1 && hasIncomingItems == false
+ scheduler.idle = false;
+ await Service.sync();
+ Assert.equal(syncSuccesses, 2);
+ Assert.ok(!scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ // idle == false && numClients <= 1 && hasIncomingItems == true
+ scheduler.hasIncomingItems = true;
+ await Service.sync();
+ Assert.equal(syncSuccesses, 3);
+ Assert.ok(!scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ // idle == true && numClients <= 1 && hasIncomingItems == true
+ scheduler.idle = true;
+ await Service.sync();
+ Assert.equal(syncSuccesses, 4);
+ Assert.ok(scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ _(
+ "Test as long as idle && numClients > 1 our sync interval is idleInterval."
+ );
+ // idle == true && numClients > 1 && hasIncomingItems == true
+ await Service.clientsEngine._store.create({
+ id: "foo",
+ cleartext: { name: "bar", type: "mobile" },
+ });
+ await Service.sync();
+ Assert.equal(syncSuccesses, 5);
+ Assert.ok(scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.idleInterval);
+
+ // idle == true && numClients > 1 && hasIncomingItems == false
+ scheduler.hasIncomingItems = false;
+ await Service.sync();
+ Assert.equal(syncSuccesses, 6);
+ Assert.ok(scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.idleInterval);
+
+ _("Test non-idle, numClients > 1, no incoming items => activeInterval.");
+ // idle == false && numClients > 1 && hasIncomingItems == false
+ scheduler.idle = false;
+ await Service.sync();
+ Assert.equal(syncSuccesses, 7);
+ Assert.ok(!scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+
+ _("Test non-idle, numClients > 1, incoming items => immediateInterval.");
+ // idle == false && numClients > 1 && hasIncomingItems == true
+ scheduler.hasIncomingItems = true;
+ await Service.sync();
+ Assert.equal(syncSuccesses, 8);
+ Assert.ok(!scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems); // gets reset to false
+ Assert.equal(scheduler.syncInterval, scheduler.immediateInterval);
+
+ Svc.Obs.remove("weave:service:sync:finish", onSyncFinish);
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_unsuccessful_sync_adjustSyncInterval() {
+ enableValidationPrefs();
+
+ _("Test unsuccessful sync calling adjustSyncInterval");
+
+ let syncFailures = 0;
+ function onSyncError() {
+ _("Sync error.");
+ syncFailures++;
+ }
+ Svc.Obs.add("weave:service:sync:error", onSyncError);
+
+ _("Test unsuccessful sync calls adjustSyncInterval");
+ // Force sync to fail.
+ Svc.PrefBranch.setStringPref("firstSync", "notReady");
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Confirm defaults
+ Assert.ok(!scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.ok(!scheduler.hasIncomingItems);
+
+ _("Test as long as numClients <= 1 our sync interval is SINGLE_USER.");
+ // idle == true && numClients <= 1 && hasIncomingItems == false
+ scheduler.idle = true;
+ await Service.sync();
+ Assert.equal(syncFailures, 1);
+ Assert.ok(scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ // idle == false && numClients <= 1 && hasIncomingItems == false
+ scheduler.idle = false;
+ await Service.sync();
+ Assert.equal(syncFailures, 2);
+ Assert.ok(!scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ // idle == false && numClients <= 1 && hasIncomingItems == true
+ scheduler.hasIncomingItems = true;
+ await Service.sync();
+ Assert.equal(syncFailures, 3);
+ Assert.ok(!scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ // idle == true && numClients <= 1 && hasIncomingItems == true
+ scheduler.idle = true;
+ await Service.sync();
+ Assert.equal(syncFailures, 4);
+ Assert.ok(scheduler.idle);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ _(
+ "Test as long as idle && numClients > 1 our sync interval is idleInterval."
+ );
+ // idle == true && numClients > 1 && hasIncomingItems == true
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 2);
+ scheduler.updateClientMode();
+
+ await Service.sync();
+ Assert.equal(syncFailures, 5);
+ Assert.ok(scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.idleInterval);
+
+ // idle == true && numClients > 1 && hasIncomingItems == false
+ scheduler.hasIncomingItems = false;
+ await Service.sync();
+ Assert.equal(syncFailures, 6);
+ Assert.ok(scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.idleInterval);
+
+ _("Test non-idle, numClients > 1, no incoming items => activeInterval.");
+ // idle == false && numClients > 1 && hasIncomingItems == false
+ scheduler.idle = false;
+ await Service.sync();
+ Assert.equal(syncFailures, 7);
+ Assert.ok(!scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+
+ _("Test non-idle, numClients > 1, incoming items => immediateInterval.");
+ // idle == false && numClients > 1 && hasIncomingItems == true
+ scheduler.hasIncomingItems = true;
+ await Service.sync();
+ Assert.equal(syncFailures, 8);
+ Assert.ok(!scheduler.idle);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.hasIncomingItems); // gets reset to false
+ Assert.equal(scheduler.syncInterval, scheduler.immediateInterval);
+
+ await Service.startOver();
+ Svc.Obs.remove("weave:service:sync:error", onSyncError);
+ await promiseStopServer(server);
+});
+
+add_task(async function test_back_triggers_sync() {
+ enableValidationPrefs();
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Single device: no sync triggered.
+ scheduler.idle = true;
+ scheduler.observe(
+ null,
+ "active",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ Assert.ok(!scheduler.idle);
+
+ // Multiple devices: sync is triggered.
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 2);
+ scheduler.updateClientMode();
+
+ let promiseDone = promiseOneObserver("weave:service:sync:finish");
+
+ scheduler.idle = true;
+ scheduler.observe(
+ null,
+ "active",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ Assert.ok(!scheduler.idle);
+ await promiseDone;
+
+ Service.recordManager.clearCache();
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ scheduler.setDefaults();
+ await clientsEngine.resetClient();
+
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_adjust_interval_on_sync_error() {
+ enableValidationPrefs();
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ let syncFailures = 0;
+ function onSyncError() {
+ _("Sync error.");
+ syncFailures++;
+ }
+ Svc.Obs.add("weave:service:sync:error", onSyncError);
+
+ _("Test unsuccessful sync updates client mode & sync intervals");
+ // Force a sync fail.
+ Svc.PrefBranch.setStringPref("firstSync", "notReady");
+
+ Assert.equal(syncFailures, 0);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 2);
+ await Service.sync();
+
+ Assert.equal(syncFailures, 1);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+
+ Svc.Obs.remove("weave:service:sync:error", onSyncError);
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_bug671378_scenario() {
+ enableValidationPrefs();
+
+ // Test scenario similar to bug 671378. This bug appeared when a score
+ // update occurred that wasn't large enough to trigger a sync so
+ // scheduleNextSync() was called without a time interval parameter,
+ // setting nextSync to a non-zero value and preventing the timer from
+ // being adjusted in the next call to scheduleNextSync().
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ let syncSuccesses = 0;
+ function onSyncFinish() {
+ _("Sync success.");
+ syncSuccesses++;
+ }
+ Svc.Obs.add("weave:service:sync:finish", onSyncFinish);
+
+ // After first sync call, syncInterval & syncTimer are singleDeviceInterval.
+ await Service.sync();
+ Assert.equal(syncSuccesses, 1);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.equal(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
+
+ let promiseDone = new Promise(resolve => {
+ // Wrap scheduleNextSync so we are notified when it is finished.
+ scheduler._scheduleNextSync = scheduler.scheduleNextSync;
+ scheduler.scheduleNextSync = function () {
+ scheduler._scheduleNextSync();
+
+ // Check on sync:finish scheduleNextSync sets the appropriate
+ // syncInterval and syncTimer values.
+ if (syncSuccesses == 2) {
+ Assert.notEqual(scheduler.nextSync, 0);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+ Assert.ok(scheduler.syncTimer.delay <= scheduler.activeInterval);
+
+ scheduler.scheduleNextSync = scheduler._scheduleNextSync;
+ Svc.Obs.remove("weave:service:sync:finish", onSyncFinish);
+ Service.startOver().then(() => {
+ server.stop(resolve);
+ });
+ }
+ };
+ });
+
+ // Set nextSync != 0
+ // syncInterval still hasn't been set by call to updateClientMode.
+ // Explicitly trying to invoke scheduleNextSync during a sync
+ // (to immitate a score update that isn't big enough to trigger a sync).
+ Svc.Obs.add("weave:service:sync:start", function onSyncStart() {
+ // Wait for other sync:start observers to be called so that
+ // nextSync is set to 0.
+ CommonUtils.nextTick(function () {
+ Svc.Obs.remove("weave:service:sync:start", onSyncStart);
+
+ scheduler.scheduleNextSync();
+ Assert.notEqual(scheduler.nextSync, 0);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.equal(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
+ });
+ });
+
+ await Service.clientsEngine._store.create({
+ id: "foo",
+ cleartext: { name: "bar", type: "mobile" },
+ });
+ await Service.sync();
+ await promiseDone;
+});
+
+add_task(async function test_adjust_timer_larger_syncInterval() {
+ _(
+ "Test syncInterval > current timout period && nextSync != 0, syncInterval is NOT used."
+ );
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 2);
+ scheduler.updateClientMode();
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+
+ scheduler.scheduleNextSync();
+
+ // Ensure we have a small interval.
+ Assert.notEqual(scheduler.nextSync, 0);
+ Assert.equal(scheduler.syncTimer.delay, scheduler.activeInterval);
+
+ // Make interval large again
+ await clientsEngine._wipeClient();
+ Svc.PrefBranch.clearUserPref("clients.devices.mobile");
+ scheduler.updateClientMode();
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ scheduler.scheduleNextSync();
+
+ // Ensure timer delay remains as the small interval.
+ Assert.notEqual(scheduler.nextSync, 0);
+ Assert.ok(scheduler.syncTimer.delay <= scheduler.activeInterval);
+
+ // SyncSchedule.
+ await Service.startOver();
+});
+
+add_task(async function test_adjust_timer_smaller_syncInterval() {
+ _(
+ "Test current timout > syncInterval period && nextSync != 0, syncInterval is used."
+ );
+ scheduler.scheduleNextSync();
+
+ // Ensure we have a large interval.
+ Assert.notEqual(scheduler.nextSync, 0);
+ Assert.equal(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
+
+ // Make interval smaller
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 2);
+ scheduler.updateClientMode();
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+
+ scheduler.scheduleNextSync();
+
+ // Ensure smaller timer delay is used.
+ Assert.notEqual(scheduler.nextSync, 0);
+ Assert.ok(scheduler.syncTimer.delay <= scheduler.activeInterval);
+
+ // SyncSchedule.
+ await Service.startOver();
+});
diff --git a/services/sync/tests/unit/test_keys.js b/services/sync/tests/unit/test_keys.js
new file mode 100644
index 0000000000..8cc5d4055c
--- /dev/null
+++ b/services/sync/tests/unit/test_keys.js
@@ -0,0 +1,242 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Weave } = ChromeUtils.importESModule(
+ "resource://services-sync/main.sys.mjs"
+);
+const { CollectionKeyManager, CryptoWrapper } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+
+var collectionKeys = new CollectionKeyManager();
+
+function do_check_keypair_eq(a, b) {
+ Assert.equal(2, a.length);
+ Assert.equal(2, b.length);
+ Assert.equal(a[0], b[0]);
+ Assert.equal(a[1], b[1]);
+}
+
+add_test(function test_set_invalid_values() {
+ _("Ensure that setting invalid encryption and HMAC key values is caught.");
+
+ let bundle = new BulkKeyBundle("foo");
+
+ let thrown = false;
+ try {
+ bundle.encryptionKey = null;
+ } catch (ex) {
+ thrown = true;
+ Assert.equal(ex.message.indexOf("Encryption key can only be set to"), 0);
+ } finally {
+ Assert.ok(thrown);
+ thrown = false;
+ }
+
+ try {
+ bundle.encryptionKey = ["trollololol"];
+ } catch (ex) {
+ thrown = true;
+ Assert.equal(ex.message.indexOf("Encryption key can only be set to"), 0);
+ } finally {
+ Assert.ok(thrown);
+ thrown = false;
+ }
+
+ try {
+ bundle.hmacKey = Utils.generateRandomBytesLegacy(15);
+ } catch (ex) {
+ thrown = true;
+ Assert.equal(ex.message.indexOf("HMAC key must be at least 128"), 0);
+ } finally {
+ Assert.ok(thrown);
+ thrown = false;
+ }
+
+ try {
+ bundle.hmacKey = null;
+ } catch (ex) {
+ thrown = true;
+ Assert.equal(ex.message.indexOf("HMAC key can only be set to string"), 0);
+ } finally {
+ Assert.ok(thrown);
+ thrown = false;
+ }
+
+ try {
+ bundle.hmacKey = ["trollolol"];
+ } catch (ex) {
+ thrown = true;
+ Assert.equal(ex.message.indexOf("HMAC key can only be set to"), 0);
+ } finally {
+ Assert.ok(thrown);
+ thrown = false;
+ }
+
+ try {
+ bundle.hmacKey = Utils.generateRandomBytesLegacy(15);
+ } catch (ex) {
+ thrown = true;
+ Assert.equal(ex.message.indexOf("HMAC key must be at least 128"), 0);
+ } finally {
+ Assert.ok(thrown);
+ thrown = false;
+ }
+
+ run_next_test();
+});
+
+add_task(async function test_ensureLoggedIn() {
+ let log = Log.repository.getLogger("Test");
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ await configureIdentity();
+
+ let keyBundle = Weave.Service.identity.syncKeyBundle;
+
+ /*
+ * Build a test version of storage/crypto/keys.
+ * Encrypt it with the sync key.
+ * Pass it into the CollectionKeyManager.
+ */
+
+ log.info("Building storage keys...");
+ let storage_keys = new CryptoWrapper("crypto", "keys");
+ let default_key64 = await Weave.Crypto.generateRandomKey();
+ let default_hmac64 = await Weave.Crypto.generateRandomKey();
+ let bookmarks_key64 = await Weave.Crypto.generateRandomKey();
+ let bookmarks_hmac64 = await Weave.Crypto.generateRandomKey();
+
+ storage_keys.cleartext = {
+ default: [default_key64, default_hmac64],
+ collections: { bookmarks: [bookmarks_key64, bookmarks_hmac64] },
+ };
+ storage_keys.modified = Date.now() / 1000;
+ storage_keys.id = "keys";
+
+ log.info("Encrypting storage keys...");
+
+ // Use passphrase (sync key) itself to encrypt the key bundle.
+ await storage_keys.encrypt(keyBundle);
+
+ // Sanity checking.
+ Assert.ok(null == storage_keys.cleartext);
+ Assert.ok(null != storage_keys.ciphertext);
+
+ log.info("Updating collection keys.");
+
+ // updateContents decrypts the object, releasing the payload for us to use.
+ // Returns true, because the default key has changed.
+ Assert.ok(await collectionKeys.updateContents(keyBundle, storage_keys));
+ let payload = storage_keys.cleartext;
+
+ _("CK: " + JSON.stringify(collectionKeys._collections));
+
+ // Test that the CollectionKeyManager returns a similar WBO.
+ let wbo = collectionKeys.asWBO("crypto", "keys");
+
+ _("WBO: " + JSON.stringify(wbo));
+ _("WBO cleartext: " + JSON.stringify(wbo.cleartext));
+
+ // Check the individual contents.
+ Assert.equal(wbo.collection, "crypto");
+ Assert.equal(wbo.id, "keys");
+ Assert.equal(undefined, wbo.modified);
+ Assert.equal(collectionKeys.lastModified, storage_keys.modified);
+ Assert.ok(!!wbo.cleartext.default);
+ do_check_keypair_eq(payload.default, wbo.cleartext.default);
+ do_check_keypair_eq(
+ payload.collections.bookmarks,
+ wbo.cleartext.collections.bookmarks
+ );
+
+ Assert.ok("bookmarks" in collectionKeys._collections);
+ Assert.equal(false, "tabs" in collectionKeys._collections);
+
+ _("Updating contents twice with the same data doesn't proceed.");
+ await storage_keys.encrypt(keyBundle);
+ Assert.equal(
+ false,
+ await collectionKeys.updateContents(keyBundle, storage_keys)
+ );
+
+ /*
+ * Test that we get the right keys out when we ask for
+ * a collection's tokens.
+ */
+ let b1 = new BulkKeyBundle("bookmarks");
+ b1.keyPairB64 = [bookmarks_key64, bookmarks_hmac64];
+ let b2 = collectionKeys.keyForCollection("bookmarks");
+ do_check_keypair_eq(b1.keyPair, b2.keyPair);
+
+ // Check key equality.
+ Assert.ok(b1.equals(b2));
+ Assert.ok(b2.equals(b1));
+
+ b1 = new BulkKeyBundle("[default]");
+ b1.keyPairB64 = [default_key64, default_hmac64];
+
+ Assert.ok(!b1.equals(b2));
+ Assert.ok(!b2.equals(b1));
+
+ b2 = collectionKeys.keyForCollection(null);
+ do_check_keypair_eq(b1.keyPair, b2.keyPair);
+
+ /*
+ * Checking for update times.
+ */
+ let info_collections = {};
+ Assert.ok(collectionKeys.updateNeeded(info_collections));
+ info_collections.crypto = 5000;
+ Assert.ok(!collectionKeys.updateNeeded(info_collections));
+ info_collections.crypto = 1 + Date.now() / 1000; // Add one in case computers are fast!
+ Assert.ok(collectionKeys.updateNeeded(info_collections));
+
+ collectionKeys.lastModified = null;
+ Assert.ok(collectionKeys.updateNeeded({}));
+
+ /*
+ * Check _compareKeyBundleCollections.
+ */
+ async function newBundle(name) {
+ let r = new BulkKeyBundle(name);
+ await r.generateRandom();
+ return r;
+ }
+ let k1 = await newBundle("k1");
+ let k2 = await newBundle("k2");
+ let k3 = await newBundle("k3");
+ let k4 = await newBundle("k4");
+ let k5 = await newBundle("k5");
+ let coll1 = { foo: k1, bar: k2 };
+ let coll2 = { foo: k1, bar: k2 };
+ let coll3 = { foo: k1, bar: k3 };
+ let coll4 = { foo: k4 };
+ let coll5 = { baz: k5, bar: k2 };
+ let coll6 = {};
+
+ let d1 = collectionKeys._compareKeyBundleCollections(coll1, coll2); // []
+ let d2 = collectionKeys._compareKeyBundleCollections(coll1, coll3); // ["bar"]
+ let d3 = collectionKeys._compareKeyBundleCollections(coll3, coll2); // ["bar"]
+ let d4 = collectionKeys._compareKeyBundleCollections(coll1, coll4); // ["bar", "foo"]
+ let d5 = collectionKeys._compareKeyBundleCollections(coll5, coll2); // ["baz", "foo"]
+ let d6 = collectionKeys._compareKeyBundleCollections(coll6, coll1); // ["bar", "foo"]
+ let d7 = collectionKeys._compareKeyBundleCollections(coll5, coll5); // []
+ let d8 = collectionKeys._compareKeyBundleCollections(coll6, coll6); // []
+
+ Assert.ok(d1.same);
+ Assert.ok(!d2.same);
+ Assert.ok(!d3.same);
+ Assert.ok(!d4.same);
+ Assert.ok(!d5.same);
+ Assert.ok(!d6.same);
+ Assert.ok(d7.same);
+ Assert.ok(d8.same);
+
+ Assert.deepEqual(d1.changed, []);
+ Assert.deepEqual(d2.changed, ["bar"]);
+ Assert.deepEqual(d3.changed, ["bar"]);
+ Assert.deepEqual(d4.changed, ["bar", "foo"]);
+ Assert.deepEqual(d5.changed, ["baz", "foo"]);
+ Assert.deepEqual(d6.changed, ["bar", "foo"]);
+});
diff --git a/services/sync/tests/unit/test_load_modules.js b/services/sync/tests/unit/test_load_modules.js
new file mode 100644
index 0000000000..93ef883d4b
--- /dev/null
+++ b/services/sync/tests/unit/test_load_modules.js
@@ -0,0 +1,59 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { AppConstants } = ChromeUtils.importESModule(
+ "resource://gre/modules/AppConstants.sys.mjs"
+);
+
+const modules = [
+ "addonutils.sys.mjs",
+ "addonsreconciler.sys.mjs",
+ "constants.sys.mjs",
+ "engines/addons.sys.mjs",
+ "engines/clients.sys.mjs",
+ "engines/extension-storage.sys.mjs",
+ "engines/passwords.sys.mjs",
+ "engines/prefs.sys.mjs",
+ "engines.sys.mjs",
+ "keys.sys.mjs",
+ "main.sys.mjs",
+ "policies.sys.mjs",
+ "record.sys.mjs",
+ "resource.sys.mjs",
+ "service.sys.mjs",
+ "stages/declined.sys.mjs",
+ "stages/enginesync.sys.mjs",
+ "status.sys.mjs",
+ "sync_auth.sys.mjs",
+ "util.sys.mjs",
+];
+
+if (AppConstants.MOZ_APP_NAME != "thunderbird") {
+ modules.push(
+ "engines/bookmarks.sys.mjs",
+ "engines/forms.sys.mjs",
+ "engines/history.sys.mjs",
+ "engines/tabs.sys.mjs"
+ );
+}
+
+const testingModules = [
+ "fakeservices.sys.mjs",
+ "rotaryengine.sys.mjs",
+ "utils.sys.mjs",
+ "fxa_utils.sys.mjs",
+];
+
+function run_test() {
+ for (let m of modules) {
+ let res = "resource://services-sync/" + m;
+ _("Attempting to load " + res);
+ ChromeUtils.importESModule(res);
+ }
+
+ for (let m of testingModules) {
+ let res = "resource://testing-common/services/sync/" + m;
+ _("Attempting to load " + res);
+ ChromeUtils.importESModule(res);
+ }
+}
diff --git a/services/sync/tests/unit/test_node_reassignment.js b/services/sync/tests/unit/test_node_reassignment.js
new file mode 100644
index 0000000000..e3352af318
--- /dev/null
+++ b/services/sync/tests/unit/test_node_reassignment.js
@@ -0,0 +1,523 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+_(
+ "Test that node reassignment responses are respected on all kinds of " +
+ "requests."
+);
+
+const { RESTRequest } = ChromeUtils.importESModule(
+ "resource://services-common/rest.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function setup() {
+ validate_all_future_pings();
+});
+
+/**
+ * Emulate the following Zeus config:
+ * $draining = data.get($prefix . $host . " draining");
+ * if ($draining == "drain.") {
+ * log.warn($log_host_db_status . " migrating=1 (node-reassignment)" .
+ * $log_suffix);
+ * http.sendResponse("401 Node reassignment", $content_type,
+ * '"server request: node reassignment"', "");
+ * }
+ */
+const reassignBody = '"server request: node reassignment"';
+
+// API-compatible with SyncServer handler. Bind `handler` to something to use
+// as a ServerCollection handler.
+function handleReassign(handler, req, resp) {
+ resp.setStatusLine(req.httpVersion, 401, "Node reassignment");
+ resp.setHeader("Content-Type", "application/json");
+ resp.bodyOutputStream.write(reassignBody, reassignBody.length);
+}
+
+async function prepareServer() {
+ let server = new SyncServer();
+ server.registerUser("johndoe");
+ server.start();
+ syncTestLogging();
+ await configureIdentity({ username: "johndoe" }, server);
+ return server;
+}
+
+function getReassigned() {
+ try {
+ return Services.prefs.getBoolPref("services.sync.lastSyncReassigned");
+ } catch (ex) {
+ if (ex.result != Cr.NS_ERROR_UNEXPECTED) {
+ do_throw(
+ "Got exception retrieving lastSyncReassigned: " + Log.exceptionStr(ex)
+ );
+ }
+ }
+ return false;
+}
+
+/**
+ * Make a test request to `url`, then watch the result of two syncs
+ * to ensure that a node request was made.
+ * Runs `between` between the two. This can be used to undo deliberate failure
+ * setup, detach observers, etc.
+ */
+async function syncAndExpectNodeReassignment(
+ server,
+ firstNotification,
+ between,
+ secondNotification,
+ url
+) {
+ let deferred = Promise.withResolvers();
+
+ let getTokenCount = 0;
+ let mockTSC = {
+ // TokenServerClient
+ async getTokenUsingOAuth() {
+ getTokenCount++;
+ return { endpoint: server.baseURI + "1.1/johndoe/" };
+ },
+ };
+ Service.identity._tokenServerClient = mockTSC;
+
+ // Make sure that it works!
+ let request = new RESTRequest(url);
+ let response = await request.get();
+ Assert.equal(response.status, 401);
+
+ function onFirstSync() {
+ _("First sync completed.");
+ Svc.Obs.remove(firstNotification, onFirstSync);
+ Svc.Obs.add(secondNotification, onSecondSync);
+
+ Assert.equal(Service.clusterURL, "");
+
+ // Allow for tests to clean up error conditions.
+ between();
+ }
+ function onSecondSync() {
+ _("Second sync completed.");
+ Svc.Obs.remove(secondNotification, onSecondSync);
+ Service.scheduler.clearSyncTriggers();
+
+ // Make absolutely sure that any event listeners are done with their work
+ // before we proceed.
+ waitForZeroTimer(function () {
+ _("Second sync nextTick.");
+ Assert.equal(getTokenCount, 1);
+ Service.startOver().then(() => {
+ server.stop(deferred.resolve);
+ });
+ });
+ }
+
+ Svc.Obs.add(firstNotification, onFirstSync);
+ await Service.sync();
+
+ await deferred.promise;
+}
+
+add_task(async function test_momentary_401_engine() {
+ enableValidationPrefs();
+
+ _("Test a failure for engine URLs that's resolved by reassignment.");
+ let server = await prepareServer();
+ let john = server.user("johndoe");
+
+ _("Enabling the Rotary engine.");
+ let { engine, syncID, tracker } = await registerRotaryEngine();
+
+ // We need the server to be correctly set up prior to experimenting. Do this
+ // through a sync.
+ let global = {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ rotary: { version: engine.version, syncID },
+ };
+ john.createCollection("meta").insert("global", global);
+
+ _("First sync to prepare server contents.");
+ await Service.sync();
+
+ let numResets = 0;
+ let observeReset = (obs, topic) => {
+ if (topic == "rotary") {
+ numResets += 1;
+ }
+ };
+ _("Adding observer that we saw an engine reset.");
+ Svc.Obs.add("weave:engine:reset-client:finish", observeReset);
+
+ _("Setting up Rotary collection to 401.");
+ let rotary = john.createCollection("rotary");
+ let oldHandler = rotary.collectionHandler;
+ rotary.collectionHandler = handleReassign.bind(this, undefined);
+
+ // We want to verify that the clusterURL pref has been cleared after a 401
+ // inside a sync. Flag the Rotary engine to need syncing.
+ john.collection("rotary").timestamp += 1000;
+
+ function between() {
+ _("Undoing test changes.");
+ rotary.collectionHandler = oldHandler;
+
+ function onLoginStart() {
+ // lastSyncReassigned shouldn't be cleared until a sync has succeeded.
+ _("Ensuring that lastSyncReassigned is still set at next sync start.");
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+ Assert.ok(getReassigned());
+ }
+
+ _("Adding observer that lastSyncReassigned is still set on login.");
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+ }
+
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:sync:finish",
+ between,
+ "weave:service:sync:finish",
+ Service.storageURL + "rotary"
+ );
+
+ Svc.Obs.remove("weave:engine:reset-client:finish", observeReset);
+ Assert.equal(numResets, 1);
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+});
+
+// This test ends up being a failing fetch *after we're already logged in*.
+add_task(async function test_momentary_401_info_collections() {
+ enableValidationPrefs();
+
+ _("Test a failure for info/collections that's resolved by reassignment.");
+ let server = await prepareServer();
+
+ _("First sync to prepare server contents.");
+ await Service.sync();
+
+ // Return a 401 for info requests, particularly info/collections.
+ let oldHandler = server.toplevelHandlers.info;
+ server.toplevelHandlers.info = handleReassign;
+
+ function undo() {
+ _("Undoing test changes.");
+ server.toplevelHandlers.info = oldHandler;
+ }
+
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:sync:error",
+ undo,
+ "weave:service:sync:finish",
+ Service.infoURL
+ );
+});
+
+add_task(async function test_momentary_401_storage_loggedin() {
+ enableValidationPrefs();
+
+ _(
+ "Test a failure for any storage URL, not just engine parts. " +
+ "Resolved by reassignment."
+ );
+ let server = await prepareServer();
+
+ _("Performing initial sync to ensure we are logged in.");
+ await Service.sync();
+
+ // Return a 401 for all storage requests.
+ let oldHandler = server.toplevelHandlers.storage;
+ server.toplevelHandlers.storage = handleReassign;
+
+ function undo() {
+ _("Undoing test changes.");
+ server.toplevelHandlers.storage = oldHandler;
+ }
+
+ Assert.ok(Service.isLoggedIn, "already logged in");
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:sync:error",
+ undo,
+ "weave:service:sync:finish",
+ Service.storageURL + "meta/global"
+ );
+});
+
+add_task(async function test_momentary_401_storage_loggedout() {
+ enableValidationPrefs();
+
+ _(
+ "Test a failure for any storage URL, not just engine parts. " +
+ "Resolved by reassignment."
+ );
+ let server = await prepareServer();
+
+ // Return a 401 for all storage requests.
+ let oldHandler = server.toplevelHandlers.storage;
+ server.toplevelHandlers.storage = handleReassign;
+
+ function undo() {
+ _("Undoing test changes.");
+ server.toplevelHandlers.storage = oldHandler;
+ }
+
+ Assert.ok(!Service.isLoggedIn, "not already logged in");
+ await syncAndExpectNodeReassignment(
+ server,
+ "weave:service:login:error",
+ undo,
+ "weave:service:sync:finish",
+ Service.storageURL + "meta/global"
+ );
+});
+
+add_task(async function test_loop_avoidance_storage() {
+ enableValidationPrefs();
+
+ _(
+ "Test that a repeated failure doesn't result in a sync loop " +
+ "if node reassignment cannot resolve the failure."
+ );
+
+ let server = await prepareServer();
+
+ // Return a 401 for all storage requests.
+ let oldHandler = server.toplevelHandlers.storage;
+ server.toplevelHandlers.storage = handleReassign;
+
+ let firstNotification = "weave:service:login:error";
+ let secondNotification = "weave:service:login:error";
+ let thirdNotification = "weave:service:sync:finish";
+
+ let deferred = Promise.withResolvers();
+
+ let getTokenCount = 0;
+ let mockTSC = {
+ // TokenServerClient
+ async getTokenUsingOAuth() {
+ getTokenCount++;
+ return { endpoint: server.baseURI + "1.1/johndoe/" };
+ },
+ };
+ Service.identity._tokenServerClient = mockTSC;
+
+ // Track the time. We want to make sure the duration between the first and
+ // second sync is small, and then that the duration between second and third
+ // is set to be large.
+ let now;
+
+ function onFirstSync() {
+ _("First sync completed.");
+ Svc.Obs.remove(firstNotification, onFirstSync);
+ Svc.Obs.add(secondNotification, onSecondSync);
+
+ Assert.equal(Service.clusterURL, "");
+
+ // We got a 401 mid-sync, and set the pref accordingly.
+ Assert.ok(Services.prefs.getBoolPref("services.sync.lastSyncReassigned"));
+
+ // Update the timestamp.
+ now = Date.now();
+ }
+
+ function onSecondSync() {
+ _("Second sync completed.");
+ Svc.Obs.remove(secondNotification, onSecondSync);
+ Svc.Obs.add(thirdNotification, onThirdSync);
+
+ // This sync occurred within the backoff interval.
+ let elapsedTime = Date.now() - now;
+ Assert.ok(elapsedTime < MINIMUM_BACKOFF_INTERVAL);
+
+ // This pref will be true until a sync completes successfully.
+ Assert.ok(getReassigned());
+
+ // The timer will be set for some distant time.
+ // We store nextSync in prefs, which offers us only limited resolution.
+ // Include that logic here.
+ let expectedNextSync =
+ 1000 * Math.floor((now + MINIMUM_BACKOFF_INTERVAL) / 1000);
+ _("Next sync scheduled for " + Service.scheduler.nextSync);
+ _("Expected to be slightly greater than " + expectedNextSync);
+
+ Assert.ok(Service.scheduler.nextSync >= expectedNextSync);
+ Assert.ok(!!Service.scheduler.syncTimer);
+
+ // Undo our evil scheme.
+ server.toplevelHandlers.storage = oldHandler;
+
+ // Bring the timer forward to kick off a successful sync, so we can watch
+ // the pref get cleared.
+ Service.scheduler.scheduleNextSync(0);
+ }
+ function onThirdSync() {
+ Svc.Obs.remove(thirdNotification, onThirdSync);
+
+ // That'll do for now; no more syncs.
+ Service.scheduler.clearSyncTriggers();
+
+ // Make absolutely sure that any event listeners are done with their work
+ // before we proceed.
+ waitForZeroTimer(function () {
+ _("Third sync nextTick.");
+ Assert.ok(!getReassigned());
+ Assert.equal(getTokenCount, 2);
+ Service.startOver().then(() => {
+ server.stop(deferred.resolve);
+ });
+ });
+ }
+
+ Svc.Obs.add(firstNotification, onFirstSync);
+
+ now = Date.now();
+ await Service.sync();
+ await deferred.promise;
+});
+
+add_task(async function test_loop_avoidance_engine() {
+ enableValidationPrefs();
+
+ _(
+ "Test that a repeated 401 in an engine doesn't result in a sync loop " +
+ "if node reassignment cannot resolve the failure."
+ );
+ let server = await prepareServer();
+ let john = server.user("johndoe");
+
+ _("Enabling the Rotary engine.");
+ let { engine, syncID, tracker } = await registerRotaryEngine();
+ let deferred = Promise.withResolvers();
+
+ let getTokenCount = 0;
+ let mockTSC = {
+ // TokenServerClient
+ async getTokenUsingOAuth() {
+ getTokenCount++;
+ return { endpoint: server.baseURI + "1.1/johndoe/" };
+ },
+ };
+ Service.identity._tokenServerClient = mockTSC;
+
+ // We need the server to be correctly set up prior to experimenting. Do this
+ // through a sync.
+ let global = {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ rotary: { version: engine.version, syncID },
+ };
+ john.createCollection("meta").insert("global", global);
+
+ _("First sync to prepare server contents.");
+ await Service.sync();
+
+ _("Setting up Rotary collection to 401.");
+ let rotary = john.createCollection("rotary");
+ let oldHandler = rotary.collectionHandler;
+ rotary.collectionHandler = handleReassign.bind(this, undefined);
+
+ // Flag the Rotary engine to need syncing.
+ john.collection("rotary").timestamp += 1000;
+
+ function onLoginStart() {
+ // lastSyncReassigned shouldn't be cleared until a sync has succeeded.
+ _("Ensuring that lastSyncReassigned is still set at next sync start.");
+ Assert.ok(getReassigned());
+ }
+
+ function beforeSuccessfulSync() {
+ _("Undoing test changes.");
+ rotary.collectionHandler = oldHandler;
+ }
+
+ let firstNotification = "weave:service:sync:finish";
+ let secondNotification = "weave:service:sync:finish";
+ let thirdNotification = "weave:service:sync:finish";
+
+ // Track the time. We want to make sure the duration between the first and
+ // second sync is small, and then that the duration between second and third
+ // is set to be large.
+ let now;
+
+ function onFirstSync() {
+ _("First sync completed.");
+ Svc.Obs.remove(firstNotification, onFirstSync);
+ Svc.Obs.add(secondNotification, onSecondSync);
+
+ Assert.equal(Service.clusterURL, "");
+
+ _("Adding observer that lastSyncReassigned is still set on login.");
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+
+ // We got a 401 mid-sync, and set the pref accordingly.
+ Assert.ok(Services.prefs.getBoolPref("services.sync.lastSyncReassigned"));
+
+ // Update the timestamp.
+ now = Date.now();
+ }
+
+ function onSecondSync() {
+ _("Second sync completed.");
+ Svc.Obs.remove(secondNotification, onSecondSync);
+ Svc.Obs.add(thirdNotification, onThirdSync);
+
+ // This sync occurred within the backoff interval.
+ let elapsedTime = Date.now() - now;
+ Assert.ok(elapsedTime < MINIMUM_BACKOFF_INTERVAL);
+
+ // This pref will be true until a sync completes successfully.
+ Assert.ok(getReassigned());
+
+ // The timer will be set for some distant time.
+ // We store nextSync in prefs, which offers us only limited resolution.
+ // Include that logic here.
+ let expectedNextSync =
+ 1000 * Math.floor((now + MINIMUM_BACKOFF_INTERVAL) / 1000);
+ _("Next sync scheduled for " + Service.scheduler.nextSync);
+ _("Expected to be slightly greater than " + expectedNextSync);
+
+ Assert.ok(Service.scheduler.nextSync >= expectedNextSync);
+ Assert.ok(!!Service.scheduler.syncTimer);
+
+ // Undo our evil scheme.
+ beforeSuccessfulSync();
+
+ // Bring the timer forward to kick off a successful sync, so we can watch
+ // the pref get cleared.
+ Service.scheduler.scheduleNextSync(0);
+ }
+
+ function onThirdSync() {
+ Svc.Obs.remove(thirdNotification, onThirdSync);
+
+ // That'll do for now; no more syncs.
+ Service.scheduler.clearSyncTriggers();
+
+ // Make absolutely sure that any event listeners are done with their work
+ // before we proceed.
+ waitForZeroTimer(function () {
+ _("Third sync nextTick.");
+ Assert.ok(!getReassigned());
+ Assert.equal(getTokenCount, 2);
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+ Service.startOver().then(() => {
+ server.stop(deferred.resolve);
+ });
+ });
+ }
+
+ Svc.Obs.add(firstNotification, onFirstSync);
+
+ now = Date.now();
+ await Service.sync();
+ await deferred.promise;
+
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+});
diff --git a/services/sync/tests/unit/test_password_engine.js b/services/sync/tests/unit/test_password_engine.js
new file mode 100644
index 0000000000..081403f63d
--- /dev/null
+++ b/services/sync/tests/unit/test_password_engine.js
@@ -0,0 +1,1257 @@
+const { FXA_PWDMGR_HOST, FXA_PWDMGR_REALM } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccountsCommon.sys.mjs"
+);
+const { LoginRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/passwords.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+const LoginInfo = Components.Constructor(
+ "@mozilla.org/login-manager/loginInfo;1",
+ Ci.nsILoginInfo,
+ "init"
+);
+
+const { LoginCSVImport } = ChromeUtils.importESModule(
+ "resource://gre/modules/LoginCSVImport.sys.mjs"
+);
+
+const { FileTestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/FileTestUtils.sys.mjs"
+);
+
+const PropertyBag = Components.Constructor(
+ "@mozilla.org/hash-property-bag;1",
+ Ci.nsIWritablePropertyBag
+);
+
+async function cleanup(engine, server) {
+ await engine._tracker.stop();
+ await engine.wipeClient();
+ engine.lastModified = null;
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Service.recordManager.clearCache();
+ if (server) {
+ await promiseStopServer(server);
+ }
+}
+
+add_task(async function setup() {
+ // Disable addon sync because AddonManager won't be initialized here.
+ await Service.engineManager.unregister("addons");
+ await Service.engineManager.unregister("extension-storage");
+});
+
+add_task(async function test_ignored_fields() {
+ _("Only changes to syncable fields should be tracked");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ enableValidationPrefs();
+
+ let loginInfo = new LoginInfo(
+ "https://example.com",
+ "",
+ null,
+ "username",
+ "password",
+ "",
+ ""
+ );
+
+ // Setting syncCounter to -1 so that it will be incremented to 0 when added.
+ loginInfo.syncCounter = -1;
+ let login = await Services.logins.addLoginAsync(loginInfo);
+ login.QueryInterface(Ci.nsILoginMetaInfo); // For `guid`.
+
+ engine._tracker.start();
+
+ try {
+ let nonSyncableProps = new PropertyBag();
+ nonSyncableProps.setProperty("timeLastUsed", Date.now());
+ nonSyncableProps.setProperty("timesUsed", 3);
+ Services.logins.modifyLogin(login, nonSyncableProps);
+
+ let noChanges = await engine.pullNewChanges();
+ deepEqual(noChanges, {}, "Should not track non-syncable fields");
+
+ let syncableProps = new PropertyBag();
+ syncableProps.setProperty("username", "newuser");
+ Services.logins.modifyLogin(login, syncableProps);
+
+ let changes = await engine.pullNewChanges();
+ deepEqual(
+ Object.keys(changes),
+ [login.guid],
+ "Should track syncable fields"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_ignored_sync_credentials() {
+ _("Sync credentials in login manager should be ignored");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ enableValidationPrefs();
+
+ engine._tracker.start();
+
+ try {
+ let login = await Services.logins.addLoginAsync(
+ new LoginInfo(
+ FXA_PWDMGR_HOST,
+ null,
+ FXA_PWDMGR_REALM,
+ "fxa-uid",
+ "creds",
+ "",
+ ""
+ )
+ );
+
+ let noChanges = await engine.pullNewChanges();
+ deepEqual(noChanges, {}, "Should not track new FxA credentials");
+
+ let props = new PropertyBag();
+ props.setProperty("password", "newcreds");
+ Services.logins.modifyLogin(login, props);
+
+ noChanges = await engine.pullNewChanges();
+ deepEqual(noChanges, {}, "Should not track changes to FxA credentials");
+
+ let foundLogins = await Services.logins.searchLoginsAsync({
+ origin: FXA_PWDMGR_HOST,
+ });
+ equal(foundLogins.length, 1);
+ equal(foundLogins[0].syncCounter, 0);
+ equal(foundLogins[0].everSynced, false);
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_password_engine() {
+ _("Basic password sync test");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("passwords");
+
+ enableValidationPrefs();
+
+ _("Add new login to upload during first sync");
+ let newLogin;
+ {
+ let login = new LoginInfo(
+ "https://example.com",
+ "",
+ null,
+ "username",
+ "password",
+ "",
+ ""
+ );
+ await Services.logins.addLoginAsync(login);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://example.com",
+ });
+ equal(logins.length, 1, "Should find new login in login manager");
+ newLogin = logins[0].QueryInterface(Ci.nsILoginMetaInfo);
+
+ // Insert a server record that's older, so that we prefer the local one.
+ let rec = new LoginRec("passwords", newLogin.guid);
+ rec.formSubmitURL = newLogin.formActionOrigin;
+ rec.httpRealm = newLogin.httpRealm;
+ rec.hostname = newLogin.origin;
+ rec.username = newLogin.username;
+ rec.password = "sekrit";
+ let remotePasswordChangeTime = Date.now() - 1 * 60 * 60 * 24 * 1000;
+ rec.timeCreated = remotePasswordChangeTime;
+ rec.timePasswordChanged = remotePasswordChangeTime;
+ collection.insert(
+ newLogin.guid,
+ encryptPayload(rec.cleartext),
+ remotePasswordChangeTime / 1000
+ );
+ }
+
+ _("Add login with older password change time to replace during first sync");
+ let oldLogin;
+ {
+ let login = new LoginInfo(
+ "https://mozilla.com",
+ "",
+ null,
+ "us3r",
+ "0ldpa55",
+ "",
+ ""
+ );
+ await Services.logins.addLoginAsync(login);
+
+ let props = new PropertyBag();
+ let localPasswordChangeTime = Date.now() - 1 * 60 * 60 * 24 * 1000;
+ props.setProperty("timePasswordChanged", localPasswordChangeTime);
+ Services.logins.modifyLogin(login, props);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://mozilla.com",
+ });
+ equal(logins.length, 1, "Should find old login in login manager");
+ oldLogin = logins[0].QueryInterface(Ci.nsILoginMetaInfo);
+ equal(oldLogin.timePasswordChanged, localPasswordChangeTime);
+
+ let rec = new LoginRec("passwords", oldLogin.guid);
+ rec.hostname = oldLogin.origin;
+ rec.formSubmitURL = oldLogin.formActionOrigin;
+ rec.httpRealm = oldLogin.httpRealm;
+ rec.username = oldLogin.username;
+ // Change the password and bump the password change time to ensure we prefer
+ // the remote one during reconciliation.
+ rec.password = "n3wpa55";
+ rec.usernameField = oldLogin.usernameField;
+ rec.passwordField = oldLogin.usernameField;
+ rec.timeCreated = oldLogin.timeCreated;
+ rec.timePasswordChanged = Date.now();
+ collection.insert(oldLogin.guid, encryptPayload(rec.cleartext));
+ }
+
+ await engine._tracker.stop();
+
+ try {
+ await sync_engine_and_validate_telem(engine, false);
+
+ let newRec = collection.cleartext(newLogin.guid);
+ equal(
+ newRec.password,
+ "password",
+ "Should update remote password for newer login"
+ );
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://mozilla.com",
+ });
+ equal(
+ logins[0].password,
+ "n3wpa55",
+ "Should update local password for older login"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_sync_outgoing() {
+ _("Test syncing outgoing records");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("passwords");
+
+ let loginInfo = new LoginInfo(
+ "http://mozilla.com",
+ "http://mozilla.com",
+ null,
+ "theuser",
+ "thepassword",
+ "username",
+ "password"
+ );
+ let login = await Services.logins.addLoginAsync(loginInfo);
+
+ engine._tracker.start();
+
+ try {
+ let foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://mozilla.com",
+ });
+ equal(foundLogins.length, 1);
+ equal(foundLogins[0].syncCounter, 1);
+ equal(foundLogins[0].everSynced, false);
+ equal(collection.count(), 0);
+
+ let guid = foundLogins[0].QueryInterface(Ci.nsILoginMetaInfo).guid;
+
+ let changes = await engine.getChangedIDs();
+ let change = changes[guid];
+ equal(Object.keys(changes).length, 1);
+ equal(change.counter, 1);
+ ok(!change.deleted);
+
+ // This test modifies the password and then performs a sync and
+ // then ensures that the synced record is correct. This is done twice
+ // to ensure that syncing occurs correctly when the server record does not
+ // yet exist and when it does already exist.
+ for (let i = 1; i <= 2; i++) {
+ _("Modify the password iteration " + i);
+ foundLogins[0].password = "newpassword" + i;
+ Services.logins.modifyLogin(login, foundLogins[0]);
+ foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://mozilla.com",
+ });
+ equal(foundLogins.length, 1);
+ // On the first pass, the counter should be 2, one for the add and one for the modify.
+ // No sync has occurred yet so everSynced should be false.
+ // On the second pass, the counter will only be 1 for the modify. The everSynced
+ // property should be true as the sync happened on the last iteration.
+ equal(foundLogins[0].syncCounter, i == 2 ? 1 : 2);
+ equal(foundLogins[0].everSynced, i == 2);
+
+ changes = await engine.getChangedIDs();
+ change = changes[guid];
+ equal(Object.keys(changes).length, 1);
+ equal(change.counter, i == 2 ? 1 : 2);
+ ok(!change.deleted);
+
+ _("Perform sync after modifying the password");
+ await sync_engine_and_validate_telem(engine, false);
+
+ equal(Object.keys(await engine.getChangedIDs()), 0);
+
+ // The remote login should have the updated password.
+ let newRec = collection.cleartext(guid);
+ equal(
+ newRec.password,
+ "newpassword" + i,
+ "Should update remote password for login"
+ );
+
+ foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://mozilla.com",
+ });
+ equal(foundLogins.length, 1);
+ equal(foundLogins[0].syncCounter, 0);
+ equal(foundLogins[0].everSynced, true);
+
+ login.password = "newpassword" + i;
+ }
+
+ // Next, modify the username and sync.
+ _("Modify the username");
+ foundLogins[0].username = "newuser";
+ Services.logins.modifyLogin(login, foundLogins[0]);
+ foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://mozilla.com",
+ });
+ equal(foundLogins.length, 1);
+ equal(foundLogins[0].syncCounter, 1);
+ equal(foundLogins[0].everSynced, true);
+
+ _("Perform sync after modifying the username");
+ await sync_engine_and_validate_telem(engine, false);
+
+ // The remote login should have the updated password.
+ let newRec = collection.cleartext(guid);
+ equal(
+ newRec.username,
+ "newuser",
+ "Should update remote username for login"
+ );
+
+ foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://mozilla.com",
+ });
+ equal(foundLogins.length, 1);
+ equal(foundLogins[0].syncCounter, 0);
+ equal(foundLogins[0].everSynced, true);
+
+ // Finally, remove the login. The server record should be marked as deleted.
+ _("Remove the login");
+ equal(collection.count(), 1);
+ equal(Services.logins.countLogins("", "", ""), 2);
+ equal((await Services.logins.getAllLogins()).length, 2);
+ ok(await engine._store.itemExists(guid));
+
+ ok((await engine._store.getAllIDs())[guid]);
+
+ Services.logins.removeLogin(foundLogins[0]);
+ foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://mozilla.com",
+ });
+ equal(foundLogins.length, 0);
+
+ changes = await engine.getChangedIDs();
+ change = changes[guid];
+ equal(Object.keys(changes).length, 1);
+ equal(change.counter, 1);
+ ok(change.deleted);
+
+ _("Perform sync after removing the login");
+ await sync_engine_and_validate_telem(engine, false);
+
+ equal(collection.count(), 1);
+ let payload = collection.payloads()[0];
+ ok(payload.deleted);
+
+ equal(Object.keys(await engine.getChangedIDs()), 0);
+
+ // All of these should not include the deleted login. Only the FxA password should exist.
+ equal(Services.logins.countLogins("", "", ""), 1);
+ equal((await Services.logins.getAllLogins()).length, 1);
+ ok(!(await engine._store.itemExists(guid)));
+
+ // getAllIDs includes deleted items but skips the FxA login.
+ ok((await engine._store.getAllIDs())[guid]);
+ let deletedLogin = await engine._store._getLoginFromGUID(guid);
+
+ equal(deletedLogin.hostname, null, "deleted login hostname");
+ equal(
+ deletedLogin.formActionOrigin,
+ null,
+ "deleted login formActionOrigin"
+ );
+ equal(deletedLogin.formSubmitURL, null, "deleted login formSubmitURL");
+ equal(deletedLogin.httpRealm, null, "deleted login httpRealm");
+ equal(deletedLogin.username, null, "deleted login username");
+ equal(deletedLogin.password, null, "deleted login password");
+ equal(deletedLogin.usernameField, "", "deleted login usernameField");
+ equal(deletedLogin.passwordField, "", "deleted login passwordField");
+ equal(deletedLogin.unknownFields, null, "deleted login unknownFields");
+ equal(deletedLogin.timeCreated, 0, "deleted login timeCreated");
+ equal(deletedLogin.timeLastUsed, 0, "deleted login timeLastUsed");
+ equal(deletedLogin.timesUsed, 0, "deleted login timesUsed");
+
+ // These fields are not reset when the login is removed.
+ equal(deletedLogin.guid, guid, "deleted login guid");
+ equal(deletedLogin.everSynced, true, "deleted login everSynced");
+ equal(deletedLogin.syncCounter, 0, "deleted login syncCounter");
+ ok(
+ deletedLogin.timePasswordChanged > 0,
+ "deleted login timePasswordChanged"
+ );
+ } finally {
+ await engine._tracker.stop();
+
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_sync_incoming() {
+ _("Test syncing incoming records");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("passwords");
+
+ const checkFields = [
+ "formSubmitURL",
+ "hostname",
+ "httpRealm",
+ "username",
+ "password",
+ "usernameField",
+ "passwordField",
+ "timeCreated",
+ ];
+
+ let guid1 = Utils.makeGUID();
+ let details = {
+ formSubmitURL: "https://www.example.com",
+ hostname: "https://www.example.com",
+ httpRealm: null,
+ username: "camel",
+ password: "llama",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+
+ try {
+ // This test creates a remote server record and then verifies that the login
+ // has been added locally after the sync occurs.
+ _("Create remote login");
+ collection.insertRecord(Object.assign({}, details, { id: guid1 }));
+
+ _("Perform sync when remote login has been added");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+ equal(logins.length, 1);
+
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).guid, guid1);
+ checkFields.forEach(field => {
+ equal(logins[0][field], details[field]);
+ });
+ equal(logins[0].timePasswordChanged, details.timePasswordChanged);
+ equal(logins[0].syncCounter, 0);
+ equal(logins[0].everSynced, true);
+
+ // Modify the password within the remote record and then sync again.
+ _("Perform sync when remote login's password has been modified");
+ let newTime = Date.now();
+ collection.updateRecord(
+ guid1,
+ cleartext => {
+ cleartext.password = "alpaca";
+ },
+ newTime / 1000 + 10
+ );
+
+ await engine.setLastSync(newTime / 1000 - 30);
+ await sync_engine_and_validate_telem(engine, false);
+
+ logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+ equal(logins.length, 1);
+
+ details.password = "alpaca";
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).guid, guid1);
+ checkFields.forEach(field => {
+ equal(logins[0][field], details[field]);
+ });
+ ok(logins[0].timePasswordChanged > details.timePasswordChanged);
+ equal(logins[0].syncCounter, 0);
+ equal(logins[0].everSynced, true);
+
+ // Modify the username within the remote record and then sync again.
+ _("Perform sync when remote login's username has been modified");
+ newTime = Date.now();
+ collection.updateRecord(
+ guid1,
+ cleartext => {
+ cleartext.username = "guanaco";
+ },
+ newTime / 1000 + 10
+ );
+
+ await engine.setLastSync(newTime / 1000 - 30);
+ await sync_engine_and_validate_telem(engine, false);
+
+ logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+ equal(logins.length, 1);
+
+ details.username = "guanaco";
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).guid, guid1);
+ checkFields.forEach(field => {
+ equal(logins[0][field], details[field]);
+ });
+ ok(logins[0].timePasswordChanged > details.timePasswordChanged);
+ equal(logins[0].syncCounter, 0);
+ equal(logins[0].everSynced, true);
+
+ // Mark the remote record as deleted and then sync again.
+ _("Perform sync when remote login has been marked for deletion");
+ newTime = Date.now();
+ collection.updateRecord(
+ guid1,
+ cleartext => {
+ cleartext.deleted = true;
+ },
+ newTime / 1000 + 10
+ );
+
+ await engine.setLastSync(newTime / 1000 - 30);
+ await sync_engine_and_validate_telem(engine, false);
+
+ logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+ equal(logins.length, 0);
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_sync_incoming_deleted() {
+ _("Test syncing incoming deleted records");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("passwords");
+
+ let guid1 = Utils.makeGUID();
+ let details2 = {
+ formSubmitURL: "https://www.example.org",
+ hostname: "https://www.example.org",
+ httpRealm: null,
+ username: "capybara",
+ password: "beaver",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ deleted: true,
+ };
+
+ try {
+ // This test creates a remote server record that has been deleted
+ // and then verifies that the login is not imported locally.
+ _("Create remote login");
+ collection.insertRecord(Object.assign({}, details2, { id: guid1 }));
+
+ _("Perform sync when remote login has been deleted");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+ equal(logins.length, 0);
+ ok(!(await engine._store.getAllIDs())[guid1]);
+ ok(!(await engine._store.itemExists(guid1)));
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_sync_incoming_deleted_localchanged_remotenewer() {
+ _(
+ "Test syncing incoming deleted records where the local login has been changed but the remote record is newer"
+ );
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("passwords");
+
+ let loginInfo = new LoginInfo(
+ "http://mozilla.com",
+ "http://mozilla.com",
+ null,
+ "kangaroo",
+ "kaola",
+ "username",
+ "password"
+ );
+ let login = await Services.logins.addLoginAsync(loginInfo);
+ let guid = login.QueryInterface(Ci.nsILoginMetaInfo).guid;
+
+ try {
+ _("Perform sync on new login");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://mozilla.com",
+ });
+ foundLogins[0].password = "wallaby";
+ Services.logins.modifyLogin(login, foundLogins[0]);
+
+ // Use a time in the future to ensure that the remote record is newer.
+ collection.updateRecord(
+ guid,
+ cleartext => {
+ cleartext.deleted = true;
+ },
+ Date.now() / 1000 + 1000
+ );
+
+ _(
+ "Perform sync when remote login has been deleted and local login has been changed"
+ );
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://mozilla.com",
+ });
+ equal(logins.length, 0);
+ ok(await engine._store.getAllIDs());
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_sync_incoming_deleted_localchanged_localnewer() {
+ _(
+ "Test syncing incoming deleted records where the local login has been changed but the local record is newer"
+ );
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("passwords");
+
+ let loginInfo = new LoginInfo(
+ "http://www.mozilla.com",
+ "http://www.mozilla.com",
+ null,
+ "lion",
+ "tiger",
+ "username",
+ "password"
+ );
+ let login = await Services.logins.addLoginAsync(loginInfo);
+ let guid = login.QueryInterface(Ci.nsILoginMetaInfo).guid;
+
+ try {
+ _("Perform sync on new login");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let foundLogins = await Services.logins.searchLoginsAsync({
+ origin: "http://www.mozilla.com",
+ });
+ foundLogins[0].password = "cheetah";
+ Services.logins.modifyLogin(login, foundLogins[0]);
+
+ // Use a time in the past to ensure that the local record is newer.
+ collection.updateRecord(
+ guid,
+ cleartext => {
+ cleartext.deleted = true;
+ },
+ Date.now() / 1000 - 1000
+ );
+
+ _(
+ "Perform sync when remote login has been deleted and local login has been changed"
+ );
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "http://www.mozilla.com",
+ });
+ equal(logins.length, 1);
+ equal(logins[0].password, "cheetah");
+ equal(logins[0].syncCounter, 0);
+ equal(logins[0].everSynced, true);
+ ok(await engine._store.getAllIDs());
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_sync_incoming_no_formactionorigin() {
+ _("Test syncing incoming a record where there is no formActionOrigin");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("passwords");
+
+ const checkFields = [
+ "formSubmitURL",
+ "hostname",
+ "httpRealm",
+ "username",
+ "password",
+ "usernameField",
+ "passwordField",
+ "timeCreated",
+ ];
+
+ let guid1 = Utils.makeGUID();
+ let details = {
+ formSubmitURL: "",
+ hostname: "https://www.example.com",
+ httpRealm: null,
+ username: "rabbit",
+ password: "squirrel",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+
+ try {
+ // This test creates a remote server record and then verifies that the login
+ // has been added locally after the sync occurs.
+ _("Create remote login");
+ collection.insertRecord(Object.assign({}, details, { id: guid1 }));
+
+ _("Perform sync when remote login has been added");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ formActionOrigin: "",
+ });
+ equal(logins.length, 1);
+
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).guid, guid1);
+ checkFields.forEach(field => {
+ equal(logins[0][field], details[field]);
+ });
+ equal(logins[0].timePasswordChanged, details.timePasswordChanged);
+ equal(logins[0].syncCounter, 0);
+ equal(logins[0].everSynced, true);
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_password_dupe() {
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("passwords");
+
+ let guid1 = Utils.makeGUID();
+ let rec1 = new LoginRec("passwords", guid1);
+ let guid2 = Utils.makeGUID();
+ let cleartext = {
+ formSubmitURL: "https://www.example.com",
+ hostname: "https://www.example.com",
+ httpRealm: null,
+ username: "foo",
+ password: "bar",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Math.round(Date.now()),
+ timePasswordChanged: Math.round(Date.now()),
+ };
+ rec1.cleartext = cleartext;
+
+ _("Create remote record with same details and guid1");
+ collection.insert(guid1, encryptPayload(rec1.cleartext));
+
+ _("Create remote record with guid2");
+ collection.insert(guid2, encryptPayload(cleartext));
+
+ _("Create local record with same details and guid1");
+ await engine._store.create(rec1);
+
+ try {
+ _("Perform sync");
+ await sync_engine_and_validate_telem(engine, true);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+
+ equal(logins.length, 1);
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).guid, guid2);
+ equal(null, collection.payload(guid1));
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_updated_null_password_sync() {
+ _("Ensure updated null login username is converted to a string");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("passwords");
+
+ let guid1 = Utils.makeGUID();
+ let guid2 = Utils.makeGUID();
+ let remoteDetails = {
+ formSubmitURL: "https://www.nullupdateexample.com",
+ hostname: "https://www.nullupdateexample.com",
+ httpRealm: null,
+ username: null,
+ password: "bar",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+ let localDetails = {
+ formSubmitURL: "https://www.nullupdateexample.com",
+ hostname: "https://www.nullupdateexample.com",
+ httpRealm: null,
+ username: "foo",
+ password: "foobar",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+
+ _("Create remote record with same details and guid1");
+ collection.insertRecord(Object.assign({}, remoteDetails, { id: guid1 }));
+
+ try {
+ _("Create local updated login with null password");
+ await engine._store.update(Object.assign({}, localDetails, { id: guid2 }));
+
+ _("Perform sync");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.nullupdateexample.com",
+ });
+
+ equal(logins.length, 1);
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).guid, guid1);
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_updated_undefined_password_sync() {
+ _("Ensure updated undefined login username is converted to a string");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("passwords");
+
+ let guid1 = Utils.makeGUID();
+ let guid2 = Utils.makeGUID();
+ let remoteDetails = {
+ formSubmitURL: "https://www.undefinedupdateexample.com",
+ hostname: "https://www.undefinedupdateexample.com",
+ httpRealm: null,
+ username: undefined,
+ password: "bar",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+ let localDetails = {
+ formSubmitURL: "https://www.undefinedupdateexample.com",
+ hostname: "https://www.undefinedupdateexample.com",
+ httpRealm: null,
+ username: "foo",
+ password: "foobar",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+
+ _("Create remote record with same details and guid1");
+ collection.insertRecord(Object.assign({}, remoteDetails, { id: guid1 }));
+
+ try {
+ _("Create local updated login with undefined password");
+ await engine._store.update(Object.assign({}, localDetails, { id: guid2 }));
+
+ _("Perform sync");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.undefinedupdateexample.com",
+ });
+
+ equal(logins.length, 1);
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).guid, guid1);
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_new_null_password_sync() {
+ _("Ensure new null login username is converted to a string");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let guid1 = Utils.makeGUID();
+ let rec1 = new LoginRec("passwords", guid1);
+ rec1.cleartext = {
+ formSubmitURL: "https://www.example.com",
+ hostname: "https://www.example.com",
+ httpRealm: null,
+ username: null,
+ password: "bar",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+
+ try {
+ _("Create local login with null password");
+ await engine._store.create(rec1);
+
+ _("Perform sync");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+
+ equal(logins.length, 1);
+ notEqual(logins[0].QueryInterface(Ci.nsILoginMetaInfo).username, null);
+ notEqual(logins[0].QueryInterface(Ci.nsILoginMetaInfo).username, undefined);
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).username, "");
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_new_undefined_password_sync() {
+ _("Ensure new undefined login username is converted to a string");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let guid1 = Utils.makeGUID();
+ let rec1 = new LoginRec("passwords", guid1);
+ rec1.cleartext = {
+ formSubmitURL: "https://www.example.com",
+ hostname: "https://www.example.com",
+ httpRealm: null,
+ username: undefined,
+ password: "bar",
+ usernameField: "username-field",
+ passwordField: "password-field",
+ timeCreated: Date.now(),
+ timePasswordChanged: Date.now(),
+ };
+
+ try {
+ _("Create local login with undefined password");
+ await engine._store.create(rec1);
+
+ _("Perform sync");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://www.example.com",
+ });
+
+ equal(logins.length, 1);
+ notEqual(logins[0].QueryInterface(Ci.nsILoginMetaInfo).username, null);
+ notEqual(logins[0].QueryInterface(Ci.nsILoginMetaInfo).username, undefined);
+ equal(logins[0].QueryInterface(Ci.nsILoginMetaInfo).username, "");
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_sync_password_validation() {
+ // This test isn't in test_password_validator to avoid duplicating cleanup.
+ _("Ensure that if a password validation happens, it ends up in the ping");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ Svc.PrefBranch.setIntPref("engine.passwords.validation.interval", 0);
+ Svc.PrefBranch.setIntPref(
+ "engine.passwords.validation.percentageChance",
+ 100
+ );
+ Svc.PrefBranch.setIntPref("engine.passwords.validation.maxRecords", -1);
+ Svc.PrefBranch.setBoolPref("engine.passwords.validation.enabled", true);
+
+ try {
+ let ping = await wait_for_ping(() => Service.sync());
+
+ let engineInfo = ping.engines.find(e => e.name == "passwords");
+ ok(engineInfo, "Engine should be in ping");
+
+ let validation = engineInfo.validation;
+ ok(validation, "Engine should have validation info");
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_roundtrip_unknown_fields() {
+ _(
+ "Testing that unknown fields from other clients get roundtripped back to server"
+ );
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("passwords");
+
+ enableValidationPrefs();
+
+ _("Add login with older password change time to replace during first sync");
+ let oldLogin;
+ {
+ let login = new LoginInfo(
+ "https://mozilla.com",
+ "",
+ null,
+ "us3r",
+ "0ldpa55",
+ "",
+ ""
+ );
+ await Services.logins.addLoginAsync(login);
+
+ let props = new PropertyBag();
+ let localPasswordChangeTime = Math.round(
+ Date.now() - 1 * 60 * 60 * 24 * 1000
+ );
+ props.setProperty("timePasswordChanged", localPasswordChangeTime);
+ Services.logins.modifyLogin(login, props);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://mozilla.com",
+ });
+ equal(logins.length, 1, "Should find old login in login manager");
+ oldLogin = logins[0].QueryInterface(Ci.nsILoginMetaInfo);
+ equal(oldLogin.timePasswordChanged, localPasswordChangeTime);
+
+ let rec = new LoginRec("passwords", oldLogin.guid);
+ rec.hostname = oldLogin.origin;
+ rec.formSubmitURL = oldLogin.formActionOrigin;
+ rec.httpRealm = oldLogin.httpRealm;
+ rec.username = oldLogin.username;
+ // Change the password and bump the password change time to ensure we prefer
+ // the remote one during reconciliation.
+ rec.password = "n3wpa55";
+ rec.usernameField = oldLogin.usernameField;
+ rec.passwordField = oldLogin.usernameField;
+ rec.timeCreated = oldLogin.timeCreated;
+ rec.timePasswordChanged = Math.round(Date.now());
+
+ // pretend other clients have some snazzy new fields
+ // we don't quite understand yet
+ rec.cleartext.someStrField = "I am a str";
+ rec.cleartext.someObjField = { newField: "I am a new field" };
+ collection.insert(oldLogin.guid, encryptPayload(rec.cleartext));
+ }
+
+ await engine._tracker.stop();
+
+ try {
+ await sync_engine_and_validate_telem(engine, false);
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: "https://mozilla.com",
+ });
+ equal(
+ logins[0].password,
+ "n3wpa55",
+ "Should update local password for older login"
+ );
+ let expectedUnknowns = JSON.stringify({
+ someStrField: "I am a str",
+ someObjField: { newField: "I am a new field" },
+ });
+ // Check that the local record has all unknown fields properly
+ // stringified
+ equal(logins[0].unknownFields, expectedUnknowns);
+
+ // Check that the server has the unknown fields unfurled and on the
+ // top-level record
+ let serverRec = collection.cleartext(oldLogin.guid);
+ equal(serverRec.someStrField, "I am a str");
+ equal(serverRec.someObjField.newField, "I am a new field");
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_new_passwords_from_csv() {
+ _("Test syncing records imported from a csv file");
+
+ let engine = Service.engineManager.get("passwords");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let collection = server.user("foo").collection("passwords");
+
+ engine._tracker.start();
+
+ let data = [
+ {
+ hostname: "https://example.com",
+ url: "https://example.com/path",
+ username: "exampleuser",
+ password: "examplepassword",
+ },
+ {
+ hostname: "https://mozilla.org",
+ url: "https://mozilla.org",
+ username: "mozillauser",
+ password: "mozillapassword",
+ },
+ {
+ hostname: "https://www.example.org",
+ url: "https://www.example.org/example1/example2",
+ username: "person",
+ password: "mypassword",
+ },
+ ];
+
+ let csvData = ["url,username,login_password"];
+ for (let row of data) {
+ csvData.push(row.url + "," + row.username + "," + row.password);
+ }
+
+ let csvFile = FileTestUtils.getTempFile(`firefox_logins.csv`);
+ await IOUtils.writeUTF8(csvFile.path, csvData.join("\r\n"));
+
+ await LoginCSVImport.importFromCSV(csvFile.path);
+
+ equal(
+ engine._tracker.score,
+ SCORE_INCREMENT_XLARGE,
+ "Should only get one update notification for import"
+ );
+
+ _("Ensure that the csv import is correct");
+ for (let item of data) {
+ let foundLogins = await Services.logins.searchLoginsAsync({
+ origin: item.hostname,
+ });
+ equal(foundLogins.length, 1);
+ equal(foundLogins[0].syncCounter, 1);
+ equal(foundLogins[0].everSynced, false);
+ equal(foundLogins[0].username, item.username);
+ equal(foundLogins[0].password, item.password);
+ }
+
+ _("Perform sync after modifying the password");
+ await sync_engine_and_validate_telem(engine, false);
+
+ _("Verify that the sync counter and status are updated");
+ for (let item of data) {
+ let foundLogins = await Services.logins.searchLoginsAsync({
+ origin: item.hostname,
+ });
+ equal(foundLogins.length, 1);
+ equal(foundLogins[0].syncCounter, 0);
+ equal(foundLogins[0].everSynced, true);
+ equal(foundLogins[0].username, item.username);
+ equal(foundLogins[0].password, item.password);
+ item.guid = foundLogins[0].guid;
+ }
+
+ equal(Object.keys(await engine.getChangedIDs()), 0);
+ equal(collection.count(), 3);
+
+ for (let item of data) {
+ // The remote login should have the imported username and password.
+ let newRec = collection.cleartext(item.guid);
+ equal(newRec.username, item.username);
+ equal(newRec.password, item.password);
+ }
+});
diff --git a/services/sync/tests/unit/test_password_store.js b/services/sync/tests/unit/test_password_store.js
new file mode 100644
index 0000000000..ed393d6241
--- /dev/null
+++ b/services/sync/tests/unit/test_password_store.js
@@ -0,0 +1,398 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { LoginRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/passwords.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { SyncedRecordsTelemetry } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+);
+
+async function checkRecord(
+ name,
+ record,
+ expectedCount,
+ timeCreated,
+ expectedTimeCreated,
+ timePasswordChanged,
+ expectedTimePasswordChanged,
+ recordIsUpdated
+) {
+ let engine = Service.engineManager.get("passwords");
+ let store = engine._store;
+
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: record.hostname,
+ formActionOrigin: record.formSubmitURL,
+ });
+
+ _("Record" + name + ":" + JSON.stringify(logins));
+ _("Count" + name + ":" + logins.length);
+
+ Assert.equal(logins.length, expectedCount);
+
+ if (expectedCount > 0) {
+ Assert.ok(!!(await store.getAllIDs())[record.id]);
+ let stored_record = logins[0].QueryInterface(Ci.nsILoginMetaInfo);
+
+ if (timeCreated !== undefined) {
+ Assert.equal(stored_record.timeCreated, expectedTimeCreated);
+ }
+
+ if (timePasswordChanged !== undefined) {
+ if (recordIsUpdated) {
+ Assert.ok(
+ stored_record.timePasswordChanged >= expectedTimePasswordChanged
+ );
+ } else {
+ Assert.equal(
+ stored_record.timePasswordChanged,
+ expectedTimePasswordChanged
+ );
+ }
+ return stored_record.timePasswordChanged;
+ }
+ } else {
+ Assert.ok(!(await store.getAllIDs())[record.id]);
+ }
+ return undefined;
+}
+
+async function changePassword(
+ name,
+ hostname,
+ password,
+ expectedCount,
+ timeCreated,
+ expectedTimeCreated,
+ timePasswordChanged,
+ expectedTimePasswordChanged,
+ insert,
+ recordIsUpdated
+) {
+ const BOGUS_GUID = "zzzzzz" + hostname;
+ let record = new LoginRec("passwords", BOGUS_GUID);
+ record.cleartext = {
+ id: BOGUS_GUID,
+ hostname,
+ formSubmitURL: hostname,
+ username: "john",
+ password,
+ usernameField: "username",
+ passwordField: "password",
+ };
+
+ if (timeCreated !== undefined) {
+ record.timeCreated = timeCreated;
+ }
+
+ if (timePasswordChanged !== undefined) {
+ record.timePasswordChanged = timePasswordChanged;
+ }
+
+ let engine = Service.engineManager.get("passwords");
+ let store = engine._store;
+
+ if (insert) {
+ let countTelemetry = new SyncedRecordsTelemetry();
+ Assert.equal(
+ (await store.applyIncomingBatch([record], countTelemetry)).length,
+ 0
+ );
+ }
+
+ return checkRecord(
+ name,
+ record,
+ expectedCount,
+ timeCreated,
+ expectedTimeCreated,
+ timePasswordChanged,
+ expectedTimePasswordChanged,
+ recordIsUpdated
+ );
+}
+
+async function test_apply_records_with_times(
+ hostname,
+ timeCreated,
+ timePasswordChanged
+) {
+ // The following record is going to be inserted in the store and it needs
+ // to be found there. Then its timestamps are going to be compared to
+ // the expected values.
+ await changePassword(
+ " ",
+ hostname,
+ "password",
+ 1,
+ timeCreated,
+ timeCreated,
+ timePasswordChanged,
+ timePasswordChanged,
+ true
+ );
+}
+
+async function test_apply_multiple_records_with_times() {
+ // The following records are going to be inserted in the store and they need
+ // to be found there. Then their timestamps are going to be compared to
+ // the expected values.
+ await changePassword(
+ "A",
+ "http://foo.a.com",
+ "password",
+ 1,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ true
+ );
+ await changePassword(
+ "B",
+ "http://foo.b.com",
+ "password",
+ 1,
+ 1000,
+ 1000,
+ undefined,
+ undefined,
+ true
+ );
+ await changePassword(
+ "C",
+ "http://foo.c.com",
+ "password",
+ 1,
+ undefined,
+ undefined,
+ 1000,
+ 1000,
+ true
+ );
+ await changePassword(
+ "D",
+ "http://foo.d.com",
+ "password",
+ 1,
+ 1000,
+ 1000,
+ 1000,
+ 1000,
+ true
+ );
+
+ // The following records are not going to be inserted in the store and they
+ // are not going to be found there.
+ await changePassword(
+ "NotInStoreA",
+ "http://foo.aaaa.com",
+ "password",
+ 0,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ false
+ );
+ await changePassword(
+ "NotInStoreB",
+ "http://foo.bbbb.com",
+ "password",
+ 0,
+ 1000,
+ 1000,
+ undefined,
+ undefined,
+ false
+ );
+ await changePassword(
+ "NotInStoreC",
+ "http://foo.cccc.com",
+ "password",
+ 0,
+ undefined,
+ undefined,
+ 1000,
+ 1000,
+ false
+ );
+ await changePassword(
+ "NotInStoreD",
+ "http://foo.dddd.com",
+ "password",
+ 0,
+ 1000,
+ 1000,
+ 1000,
+ 1000,
+ false
+ );
+}
+
+async function test_apply_same_record_with_different_times() {
+ // The following record is going to be inserted multiple times in the store
+ // and it needs to be found there. Then its timestamps are going to be
+ // compared to the expected values.
+
+ /* eslint-disable no-unused-vars */
+ /* The eslint linter thinks that timePasswordChanged is unused, even though
+ it is passed as an argument to changePassword. */
+ var timePasswordChanged = 100;
+ timePasswordChanged = await changePassword(
+ "A",
+ "http://a.tn",
+ "password",
+ 1,
+ 100,
+ 100,
+ 100,
+ timePasswordChanged,
+ true
+ );
+ timePasswordChanged = await changePassword(
+ "A",
+ "http://a.tn",
+ "password",
+ 1,
+ 100,
+ 100,
+ 800,
+ timePasswordChanged,
+ true,
+ true
+ );
+ timePasswordChanged = await changePassword(
+ "A",
+ "http://a.tn",
+ "password",
+ 1,
+ 500,
+ 100,
+ 800,
+ timePasswordChanged,
+ true,
+ true
+ );
+ timePasswordChanged = await changePassword(
+ "A",
+ "http://a.tn",
+ "password2",
+ 1,
+ 500,
+ 100,
+ 1536213005222,
+ timePasswordChanged,
+ true,
+ true
+ );
+ timePasswordChanged = await changePassword(
+ "A",
+ "http://a.tn",
+ "password2",
+ 1,
+ 500,
+ 100,
+ 800,
+ timePasswordChanged,
+ true,
+ true
+ );
+ /* eslint-enable no-unused-vars */
+}
+
+async function test_LoginRec_toString(store, recordData) {
+ let rec = await store.createRecord(recordData.id);
+ ok(rec);
+ ok(!rec.toString().includes(rec.password));
+}
+
+add_task(async function run_test() {
+ const BOGUS_GUID_A = "zzzzzzzzzzzz";
+ const BOGUS_GUID_B = "yyyyyyyyyyyy";
+ let recordA = new LoginRec("passwords", BOGUS_GUID_A);
+ let recordB = new LoginRec("passwords", BOGUS_GUID_B);
+ recordA.cleartext = {
+ id: BOGUS_GUID_A,
+ hostname: "http://foo.bar.com",
+ formSubmitURL: "http://foo.bar.com",
+ httpRealm: "secure",
+ username: "john",
+ password: "smith",
+ usernameField: "username",
+ passwordField: "password",
+ };
+ recordB.cleartext = {
+ id: BOGUS_GUID_B,
+ hostname: "http://foo.baz.com",
+ formSubmitURL: "http://foo.baz.com",
+ username: "john",
+ password: "smith",
+ usernameField: "username",
+ passwordField: "password",
+ unknownStr: "an unknown string from another field",
+ };
+
+ let engine = Service.engineManager.get("passwords");
+ let store = engine._store;
+
+ try {
+ let countTelemetry = new SyncedRecordsTelemetry();
+ Assert.equal(
+ (await store.applyIncomingBatch([recordA, recordB], countTelemetry))
+ .length,
+ 0
+ );
+
+ // Only the good record makes it to Services.logins.
+ let badLogins = await Services.logins.searchLoginsAsync({
+ origin: recordA.hostname,
+ formActionOrigin: recordA.formSubmitURL,
+ httpRealm: recordA.httpRealm,
+ });
+ let goodLogins = await Services.logins.searchLoginsAsync({
+ origin: recordB.hostname,
+ formActionOrigin: recordB.formSubmitURL,
+ });
+
+ _("Bad: " + JSON.stringify(badLogins));
+ _("Good: " + JSON.stringify(goodLogins));
+ _("Count: " + badLogins.length + ", " + goodLogins.length);
+
+ Assert.equal(goodLogins.length, 1);
+ Assert.equal(badLogins.length, 0);
+
+ // applyIncoming should've put any unknown fields from the server
+ // into a catch-all unknownFields field
+ Assert.equal(
+ goodLogins[0].unknownFields,
+ JSON.stringify({
+ unknownStr: "an unknown string from another field",
+ })
+ );
+
+ Assert.ok(!!(await store.getAllIDs())[BOGUS_GUID_B]);
+ Assert.ok(!(await store.getAllIDs())[BOGUS_GUID_A]);
+
+ await test_LoginRec_toString(store, recordB);
+
+ await test_apply_records_with_times(
+ "http://afoo.baz.com",
+ undefined,
+ undefined
+ );
+ await test_apply_records_with_times("http://bfoo.baz.com", 1000, undefined);
+ await test_apply_records_with_times("http://cfoo.baz.com", undefined, 2000);
+ await test_apply_records_with_times("http://dfoo.baz.com", 1000, 2000);
+
+ await test_apply_multiple_records_with_times();
+
+ await test_apply_same_record_with_different_times();
+ } finally {
+ await store.wipe();
+ }
+});
diff --git a/services/sync/tests/unit/test_password_tracker.js b/services/sync/tests/unit/test_password_tracker.js
new file mode 100644
index 0000000000..77b46d1d2c
--- /dev/null
+++ b/services/sync/tests/unit/test_password_tracker.js
@@ -0,0 +1,248 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { PasswordEngine, LoginRec } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/passwords.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+let engine;
+let store;
+let tracker;
+
+add_task(async function setup() {
+ await Service.engineManager.register(PasswordEngine);
+ engine = Service.engineManager.get("passwords");
+ store = engine._store;
+ tracker = engine._tracker;
+});
+
+add_task(async function test_tracking() {
+ let recordNum = 0;
+
+ _("Verify we've got an empty tracker to work with.");
+ let changes = await engine.getChangedIDs();
+ do_check_empty(changes);
+
+ let exceptionHappened = false;
+ try {
+ await tracker.getChangedIDs();
+ } catch (ex) {
+ exceptionHappened = true;
+ }
+ ok(exceptionHappened, "tracker does not keep track of changes");
+
+ async function createPassword() {
+ _("RECORD NUM: " + recordNum);
+ let record = new LoginRec("passwords", "GUID" + recordNum);
+ record.cleartext = {
+ id: "GUID" + recordNum,
+ hostname: "http://foo.bar.com",
+ formSubmitURL: "http://foo.bar.com",
+ username: "john" + recordNum,
+ password: "smith",
+ usernameField: "username",
+ passwordField: "password",
+ };
+ recordNum++;
+ let login = store._nsLoginInfoFromRecord(record);
+ await Services.logins.addLoginAsync(login);
+ await tracker.asyncObserver.promiseObserversComplete();
+ }
+
+ try {
+ tracker.start();
+ await createPassword();
+ changes = await engine.getChangedIDs();
+ do_check_attribute_count(changes, 1);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(changes.GUID0.counter, 1);
+ Assert.ok(typeof changes.GUID0.modified, "number");
+
+ _("Starting twice won't do any harm.");
+ tracker.start();
+ await createPassword();
+ changes = await engine.getChangedIDs();
+ do_check_attribute_count(changes, 2);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 2);
+ Assert.equal(changes.GUID0.counter, 1);
+ Assert.equal(changes.GUID1.counter, 1);
+
+ // The tracker doesn't keep track of changes, so 3 changes
+ // should still be returned, but the score is not updated.
+ _("Let's stop tracking again.");
+ tracker.resetScore();
+ await tracker.stop();
+ await createPassword();
+ changes = await engine.getChangedIDs();
+ do_check_attribute_count(changes, 3);
+ Assert.equal(tracker.score, 0);
+ Assert.equal(changes.GUID0.counter, 1);
+ Assert.equal(changes.GUID1.counter, 1);
+ Assert.equal(changes.GUID2.counter, 1);
+
+ _("Stopping twice won't do any harm.");
+ await tracker.stop();
+ await createPassword();
+ changes = await engine.getChangedIDs();
+ do_check_attribute_count(changes, 4);
+ Assert.equal(tracker.score, 0);
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ tracker.resetScore();
+ await tracker.stop();
+ }
+});
+
+add_task(async function test_onWipe() {
+ _("Verify we've got an empty tracker to work with.");
+ const changes = await engine.getChangedIDs();
+ do_check_empty(changes);
+ Assert.equal(tracker.score, 0);
+
+ try {
+ _("A store wipe should increment the score");
+ tracker.start();
+ await store.wipe();
+ await tracker.asyncObserver.promiseObserversComplete();
+
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ } finally {
+ tracker.resetScore();
+ await tracker.stop();
+ }
+});
+
+add_task(async function test_removeAllLogins() {
+ let recordNum = 0;
+ _("Verify that all tracked logins are removed.");
+
+ // Perform this test twice, the first time where a sync is not performed
+ // between adding and removing items and the second time where a sync is
+ // performed. In the former case, the logins will just be deleted because
+ // they have never been synced, so they won't be detected as changes. In
+ // the latter case, the logins have been synced so they will be marked for
+ // deletion.
+ for (let syncBeforeRemove of [false, true]) {
+ async function createPassword() {
+ _("RECORD NUM: " + recordNum);
+ let record = new LoginRec("passwords", "GUID" + recordNum);
+ record.cleartext = {
+ id: "GUID" + recordNum,
+ hostname: "http://foo.bar.com",
+ formSubmitURL: "http://foo.bar.com",
+ username: "john" + recordNum,
+ password: "smith",
+ usernameField: "username",
+ passwordField: "password",
+ };
+ recordNum++;
+ let login = store._nsLoginInfoFromRecord(record);
+ await Services.logins.addLoginAsync(login);
+
+ await tracker.asyncObserver.promiseObserversComplete();
+ }
+ try {
+ _("Tell tracker to start tracking changes");
+ tracker.start();
+ await createPassword();
+ await createPassword();
+ let changes = await engine.getChangedIDs();
+ do_check_attribute_count(changes, 2);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 2);
+
+ if (syncBeforeRemove) {
+ let logins = await Services.logins.getAllLogins();
+ for (let login of logins) {
+ engine.markSynced(login.guid);
+ }
+ }
+
+ _("Tell sync to remove all logins");
+ Services.logins.removeAllUserFacingLogins();
+ await tracker.asyncObserver.promiseObserversComplete();
+ changes = await engine.getChangedIDs();
+ do_check_attribute_count(changes, syncBeforeRemove ? 2 : 0);
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 5);
+
+ let deletedGuids = await engine._store.getAllIDs();
+ if (syncBeforeRemove) {
+ for (let guid in deletedGuids) {
+ let deletedLogin = await engine._store._getLoginFromGUID(guid);
+
+ Assert.equal(deletedLogin.hostname, null, "deleted login hostname");
+ Assert.equal(
+ deletedLogin.formActionOrigin,
+ null,
+ "deleted login formActionOrigin"
+ );
+ Assert.equal(
+ deletedLogin.formSubmitURL,
+ null,
+ "deleted login formSubmitURL"
+ );
+ Assert.equal(deletedLogin.httpRealm, null, "deleted login httpRealm");
+ Assert.equal(deletedLogin.username, null, "deleted login username");
+ Assert.equal(deletedLogin.password, null, "deleted login password");
+ Assert.equal(
+ deletedLogin.usernameField,
+ "",
+ "deleted login usernameField"
+ );
+ Assert.equal(
+ deletedLogin.passwordField,
+ "",
+ "deleted login passwordField"
+ );
+ Assert.equal(
+ deletedLogin.unknownFields,
+ null,
+ "deleted login unknownFields"
+ );
+ Assert.equal(
+ deletedLogin.timeCreated,
+ 0,
+ "deleted login timeCreated"
+ );
+ Assert.equal(
+ deletedLogin.timeLastUsed,
+ 0,
+ "deleted login timeLastUsed"
+ );
+ Assert.equal(deletedLogin.timesUsed, 0, "deleted login timesUsed");
+
+ // These fields are not reset when the login is removed.
+ Assert.ok(deletedLogin.guid.startsWith("GUID"), "deleted login guid");
+ Assert.equal(
+ deletedLogin.everSynced,
+ true,
+ "deleted login everSynced"
+ );
+ Assert.equal(
+ deletedLogin.syncCounter,
+ 2,
+ "deleted login syncCounter"
+ );
+ Assert.ok(
+ deletedLogin.timePasswordChanged > 0,
+ "deleted login timePasswordChanged"
+ );
+ }
+ } else {
+ Assert.equal(
+ Object.keys(deletedGuids).length,
+ 0,
+ "no logins remain after removeAllUserFacingLogins"
+ );
+ }
+ } finally {
+ _("Clean up.");
+ await store.wipe();
+ tracker.resetScore();
+ await tracker.stop();
+ }
+ }
+});
diff --git a/services/sync/tests/unit/test_password_validator.js b/services/sync/tests/unit/test_password_validator.js
new file mode 100644
index 0000000000..445b119e1d
--- /dev/null
+++ b/services/sync/tests/unit/test_password_validator.js
@@ -0,0 +1,176 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { PasswordValidator } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/passwords.sys.mjs"
+);
+
+function getDummyServerAndClient() {
+ return {
+ server: [
+ {
+ id: "11111",
+ guid: "11111",
+ hostname: "https://www.11111.com",
+ formSubmitURL: "https://www.11111.com",
+ password: "qwerty123",
+ passwordField: "pass",
+ username: "foobar",
+ usernameField: "user",
+ httpRealm: null,
+ },
+ {
+ id: "22222",
+ guid: "22222",
+ hostname: "https://www.22222.org",
+ formSubmitURL: "https://www.22222.org",
+ password: "hunter2",
+ passwordField: "passwd",
+ username: "baz12345",
+ usernameField: "user",
+ httpRealm: null,
+ },
+ {
+ id: "33333",
+ guid: "33333",
+ hostname: "https://www.33333.com",
+ formSubmitURL: "https://www.33333.com",
+ password: "p4ssw0rd",
+ passwordField: "passwad",
+ username: "quux",
+ usernameField: "user",
+ httpRealm: null,
+ },
+ ],
+ client: [
+ {
+ id: "11111",
+ guid: "11111",
+ hostname: "https://www.11111.com",
+ formSubmitURL: "https://www.11111.com",
+ password: "qwerty123",
+ passwordField: "pass",
+ username: "foobar",
+ usernameField: "user",
+ httpRealm: null,
+ },
+ {
+ id: "22222",
+ guid: "22222",
+ hostname: "https://www.22222.org",
+ formSubmitURL: "https://www.22222.org",
+ password: "hunter2",
+ passwordField: "passwd",
+ username: "baz12345",
+ usernameField: "user",
+ httpRealm: null,
+ },
+ {
+ id: "33333",
+ guid: "33333",
+ hostname: "https://www.33333.com",
+ formSubmitURL: "https://www.33333.com",
+ password: "p4ssw0rd",
+ passwordField: "passwad",
+ username: "quux",
+ usernameField: "user",
+ httpRealm: null,
+ },
+ ],
+ };
+}
+
+add_task(async function test_valid() {
+ let { server, client } = getDummyServerAndClient();
+ let validator = new PasswordValidator();
+ let { problemData, clientRecords, records, deletedRecords } =
+ await validator.compareClientWithServer(client, server);
+ equal(clientRecords.length, 3);
+ equal(records.length, 3);
+ equal(deletedRecords.length, 0);
+ deepEqual(problemData, validator.emptyProblemData());
+});
+
+add_task(async function test_missing() {
+ let validator = new PasswordValidator();
+ {
+ let { server, client } = getDummyServerAndClient();
+
+ client.pop();
+
+ let { problemData, clientRecords, records, deletedRecords } =
+ await validator.compareClientWithServer(client, server);
+
+ equal(clientRecords.length, 2);
+ equal(records.length, 3);
+ equal(deletedRecords.length, 0);
+
+ let expected = validator.emptyProblemData();
+ expected.clientMissing.push("33333");
+ deepEqual(problemData, expected);
+ }
+ {
+ let { server, client } = getDummyServerAndClient();
+
+ server.pop();
+
+ let { problemData, clientRecords, records, deletedRecords } =
+ await validator.compareClientWithServer(client, server);
+
+ equal(clientRecords.length, 3);
+ equal(records.length, 2);
+ equal(deletedRecords.length, 0);
+
+ let expected = validator.emptyProblemData();
+ expected.serverMissing.push("33333");
+ deepEqual(problemData, expected);
+ }
+});
+
+add_task(async function test_deleted() {
+ let { server, client } = getDummyServerAndClient();
+ let deletionRecord = { id: "444444", guid: "444444", deleted: true };
+
+ server.push(deletionRecord);
+ let validator = new PasswordValidator();
+
+ let { problemData, clientRecords, records, deletedRecords } =
+ await validator.compareClientWithServer(client, server);
+
+ equal(clientRecords.length, 3);
+ equal(records.length, 4);
+ deepEqual(deletedRecords, [deletionRecord]);
+
+ let expected = validator.emptyProblemData();
+ deepEqual(problemData, expected);
+});
+
+add_task(async function test_duplicates() {
+ let validator = new PasswordValidator();
+ {
+ let { server, client } = getDummyServerAndClient();
+ client.push(Cu.cloneInto(client[0], {}));
+
+ let { problemData } = await validator.compareClientWithServer(
+ client,
+ server
+ );
+
+ let expected = validator.emptyProblemData();
+ expected.clientDuplicates.push("11111");
+ deepEqual(problemData, expected);
+ }
+ {
+ let { server, client } = getDummyServerAndClient();
+ server.push(Cu.cloneInto(server[server.length - 1], {}));
+
+ let { problemData } = await validator.compareClientWithServer(
+ client,
+ server
+ );
+
+ let expected = validator.emptyProblemData();
+ expected.duplicates.push("33333");
+ deepEqual(problemData, expected);
+ }
+});
diff --git a/services/sync/tests/unit/test_postqueue.js b/services/sync/tests/unit/test_postqueue.js
new file mode 100644
index 0000000000..2e687bce11
--- /dev/null
+++ b/services/sync/tests/unit/test_postqueue.js
@@ -0,0 +1,985 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+let { PostQueue } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+
+function makeRecord(nbytes) {
+ return {
+ toJSON: () => ({ payload: "x".repeat(nbytes) }),
+ };
+}
+
+// Note: This is 14 bytes. Tests make assumptions about this (even if it's just
+// in setting config.max_request_bytes to a specific value).
+makeRecord.nonPayloadOverhead = JSON.stringify(makeRecord(0).toJSON()).length;
+
+// Gives how many encoded bytes a request with the given payload
+// sizes will be (assuming the records were created by makeRecord)
+// requestBytesFor([20]) => 22, requestBytesFor([20, 20]) => 43
+function requestBytesFor(recordPayloadByteCounts) {
+ let requestBytes = 1;
+ for (let size of recordPayloadByteCounts) {
+ requestBytes += size + 1 + makeRecord.nonPayloadOverhead;
+ }
+ return requestBytes;
+}
+
+function makePostQueue(config, lastModTime, responseGenerator) {
+ let stats = {
+ posts: [],
+ batches: [],
+ };
+ let poster = (data, headers, batch, commit) => {
+ let payloadBytes = 0;
+ let numRecords = 0;
+ for (let record of JSON.parse(data)) {
+ if (config.max_record_payload_bytes) {
+ less(
+ record.payload.length,
+ config.max_record_payload_bytes,
+ "PostQueue should respect max_record_payload_bytes"
+ );
+ }
+ payloadBytes += record.payload.length;
+ ++numRecords;
+ }
+
+ let thisPost = {
+ nbytes: data.length,
+ batch,
+ commit,
+ payloadBytes,
+ numRecords,
+ };
+
+ if (headers.length) {
+ thisPost.headers = headers;
+ }
+
+ // check that we respected the provided limits for the post
+ if (config.max_post_records) {
+ lessOrEqual(
+ numRecords,
+ config.max_post_records,
+ "PostQueue should respect max_post_records"
+ );
+ }
+
+ if (config.max_post_bytes) {
+ less(
+ payloadBytes,
+ config.max_post_bytes,
+ "PostQueue should respect max_post_bytes"
+ );
+ }
+
+ if (config.max_request_bytes) {
+ less(
+ thisPost.nbytes,
+ config.max_request_bytes,
+ "PostQueue should respect max_request_bytes"
+ );
+ }
+
+ stats.posts.push(thisPost);
+
+ // Call this now so we can check if there's a batch id in it.
+ // Kind of cludgey, but allows us to have the correct batch id even
+ // before the next post is made.
+ let nextResponse = responseGenerator.next().value;
+
+ // Record info for the batch.
+
+ let curBatch = stats.batches[stats.batches.length - 1];
+ // If there's no batch, it committed, or we requested a new one,
+ // then we need to start a new one.
+ if (!curBatch || batch == "true" || curBatch.didCommit) {
+ curBatch = {
+ posts: 0,
+ payloadBytes: 0,
+ numRecords: 0,
+ didCommit: false,
+ batch,
+ serverBatch: false,
+ };
+ if (nextResponse.obj && nextResponse.obj.batch) {
+ curBatch.batch = nextResponse.obj.batch;
+ curBatch.serverBatch = true;
+ }
+ stats.batches.push(curBatch);
+ }
+
+ // If we provided a batch id, it must be the same as the current batch
+ if (batch && batch != "true") {
+ equal(curBatch.batch, batch);
+ }
+
+ curBatch.posts += 1;
+ curBatch.payloadBytes += payloadBytes;
+ curBatch.numRecords += numRecords;
+ curBatch.didCommit = commit;
+
+ // if this is an actual server batch (or it's a one-shot batch), check that
+ // we respected the provided total limits
+ if (commit && (batch == "true" || curBatch.serverBatch)) {
+ if (config.max_total_records) {
+ lessOrEqual(
+ curBatch.numRecords,
+ config.max_total_records,
+ "PostQueue should respect max_total_records"
+ );
+ }
+
+ if (config.max_total_bytes) {
+ less(
+ curBatch.payloadBytes,
+ config.max_total_bytes,
+ "PostQueue should respect max_total_bytes"
+ );
+ }
+ }
+
+ return Promise.resolve(nextResponse);
+ };
+
+ let done = () => {};
+ let pq = new PostQueue(poster, lastModTime, config, getTestLogger(), done);
+ return { pq, stats };
+}
+
+add_task(async function test_simple() {
+ let config = {
+ max_request_bytes: 1000,
+ max_record_payload_bytes: 1000,
+ };
+
+ const time = 11111111;
+
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 200,
+ headers: {
+ "x-weave-timestamp": time + 100,
+ "x-last-modified": time + 100,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ await pq.enqueue(makeRecord(10));
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([10]),
+ payloadBytes: 10,
+ numRecords: 1,
+ commit: true, // we don't know if we have batch semantics, so committed.
+ headers: [["x-if-unmodified-since", time]],
+ batch: "true",
+ },
+ ]);
+ deepEqual(stats.batches, [
+ {
+ posts: 1,
+ payloadBytes: 10,
+ numRecords: 1,
+ didCommit: true,
+ batch: "true",
+ serverBatch: false,
+ },
+ ]);
+});
+
+// Test we do the right thing when we need to make multiple posts when there
+// are no batch semantics
+add_task(async function test_max_request_bytes_no_batch() {
+ let config = {
+ max_request_bytes: 50,
+ max_record_payload_bytes: 50,
+ };
+
+ const time = 11111111;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 200,
+ headers: {
+ "x-weave-timestamp": time + 100,
+ "x-last-modified": time + 100,
+ },
+ };
+ yield {
+ success: true,
+ status: 200,
+ headers: {
+ "x-weave-timestamp": time + 200,
+ "x-last-modified": time + 200,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ let payloadSize = 20 - makeRecord.nonPayloadOverhead;
+ await pq.enqueue(makeRecord(payloadSize)); // total size now 22 bytes - "[" + record + "]"
+ await pq.enqueue(makeRecord(payloadSize)); // total size now 43 bytes - "[" + record + "," + record + "]"
+ await pq.enqueue(makeRecord(payloadSize)); // this will exceed our byte limit, so will be in the 2nd POST.
+ await pq.flush(true);
+ deepEqual(stats.posts, [
+ {
+ nbytes: 43, // 43 for the first part
+ payloadBytes: payloadSize * 2,
+ numRecords: 2,
+ commit: false,
+ headers: [["x-if-unmodified-since", time]],
+ batch: "true",
+ },
+ {
+ nbytes: 22,
+ payloadBytes: payloadSize,
+ numRecords: 1,
+ commit: false, // we know we aren't in a batch, so never commit.
+ headers: [["x-if-unmodified-since", time + 100]],
+ batch: null,
+ },
+ ]);
+ equal(stats.batches.filter(x => x.didCommit).length, 0);
+ equal(pq.lastModified, time + 200);
+});
+
+add_task(async function test_max_record_payload_bytes_no_batch() {
+ let config = {
+ max_request_bytes: 100,
+ max_record_payload_bytes: 50,
+ };
+
+ const time = 11111111;
+
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 200,
+ headers: {
+ "x-weave-timestamp": time + 100,
+ "x-last-modified": time + 100,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ // Should trigger when the record really is too large to fit
+ let { enqueued } = await pq.enqueue(makeRecord(51));
+ ok(!enqueued);
+ // Shouldn't trigger when the encoded record is too big
+ ok(
+ (await pq.enqueue(makeRecord(50 - makeRecord.nonPayloadOverhead))).enqueued
+ ); // total size now 52 bytes - "[" + record + "]"
+ ok(
+ (await pq.enqueue(makeRecord(46 - makeRecord.nonPayloadOverhead))).enqueued
+ ); // total size now 99 bytes - "[" + record0 + "," + record1 + "]"
+
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: 99,
+ payloadBytes: 50 + 46 - makeRecord.nonPayloadOverhead * 2,
+ numRecords: 2,
+ commit: true, // we know we aren't in a batch, so never commit.
+ batch: "true",
+ headers: [["x-if-unmodified-since", time]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 1,
+ payloadBytes: 50 + 46 - makeRecord.nonPayloadOverhead * 2,
+ numRecords: 2,
+ didCommit: true,
+ batch: "true",
+ serverBatch: false,
+ },
+ ]);
+
+ equal(pq.lastModified, time + 100);
+});
+
+// Batch tests.
+
+// Test making a single post when batch semantics are in place.
+
+add_task(async function test_single_batch() {
+ let config = {
+ max_post_bytes: 1000,
+ max_post_records: 100,
+ max_total_records: 200,
+ max_record_payload_bytes: 1000,
+ };
+ const time = 11111111;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([10]),
+ numRecords: 1,
+ payloadBytes: 10,
+ commit: true, // we don't know if we have batch semantics, so committed.
+ batch: "true",
+ headers: [["x-if-unmodified-since", time]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 1,
+ payloadBytes: 10,
+ numRecords: 1,
+ didCommit: true,
+ batch: 1234,
+ serverBatch: true,
+ },
+ ]);
+});
+
+// Test we do the right thing when we need to make multiple posts due to
+// max_post_bytes when there are batch semantics in place.
+add_task(async function test_max_post_bytes_batch() {
+ let config = {
+ max_post_bytes: 50,
+ max_post_records: 4,
+ max_total_bytes: 5000,
+ max_total_records: 100,
+ max_record_payload_bytes: 50,
+ max_request_bytes: 4000,
+ };
+
+ const time = 11111111;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: {
+ "x-last-modified": time + 200,
+ "x-weave-timestamp": time + 200,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // 20
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // 40
+ // 60 would overflow, so post
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // 20
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([20, 20]),
+ payloadBytes: 40,
+ numRecords: 2,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time]],
+ },
+ {
+ nbytes: requestBytesFor([20]),
+ payloadBytes: 20,
+ numRecords: 1,
+ commit: true,
+ batch: 1234,
+ headers: [["x-if-unmodified-since", time]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 2,
+ payloadBytes: 60,
+ numRecords: 3,
+ didCommit: true,
+ batch: 1234,
+ serverBatch: true,
+ },
+ ]);
+
+ equal(pq.lastModified, time + 200);
+});
+
+// Test we do the right thing when we need to make multiple posts due to
+// max_request_bytes when there are batch semantics in place.
+add_task(async function test_max_request_bytes_batch() {
+ let config = {
+ max_post_bytes: 60,
+ max_post_records: 40,
+ max_total_bytes: 5000,
+ max_total_records: 100,
+ max_record_payload_bytes: 500,
+ max_request_bytes: 100,
+ };
+
+ const time = 11111111;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: {
+ "x-last-modified": time + 200,
+ "x-weave-timestamp": time + 200,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ ok((await pq.enqueue(makeRecord(10))).enqueued); // post: 10, request: 26 (10 + 14 + 2)
+ ok((await pq.enqueue(makeRecord(10))).enqueued); // post: 20, request: 51 (10 + 14 + 1) * 2 + 1
+ ok((await pq.enqueue(makeRecord(10))).enqueued); // post: 30, request: 76 (10 + 14 + 1) * 3 + 1
+ // 1 more would be post: 40 (fine), request: 101, So we should post.
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([10, 10, 10]),
+ payloadBytes: 30,
+ numRecords: 3,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time]],
+ },
+ {
+ nbytes: requestBytesFor([10]),
+ payloadBytes: 10,
+ numRecords: 1,
+ commit: true,
+ batch: 1234,
+ headers: [["x-if-unmodified-since", time]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 2,
+ payloadBytes: 40,
+ numRecords: 4,
+ didCommit: true,
+ batch: 1234,
+ serverBatch: true,
+ },
+ ]);
+
+ equal(pq.lastModified, time + 200);
+});
+
+// Test we do the right thing when the batch bytes limit is exceeded.
+add_task(async function test_max_total_bytes_batch() {
+ let config = {
+ max_post_bytes: 50,
+ max_post_records: 20,
+ max_total_bytes: 70,
+ max_total_records: 100,
+ max_record_payload_bytes: 50,
+ max_request_bytes: 500,
+ };
+
+ const time0 = 11111111;
+ const time1 = 22222222;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time0, "x-weave-timestamp": time0 + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time1, "x-weave-timestamp": time1 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 5678 },
+ headers: { "x-last-modified": time1, "x-weave-timestamp": time1 + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 5678 },
+ headers: {
+ "x-last-modified": time1 + 200,
+ "x-weave-timestamp": time1 + 200,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time0, responseGenerator());
+
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // payloads = post: 20, batch: 20
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // payloads = post: 40, batch: 40
+
+ // this will exceed our POST byte limit, so will be in the 2nd POST - but still in the first batch.
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // payloads = post: 20, batch: 60
+
+ // this will exceed our batch byte limit, so will be in a new batch.
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // payloads = post: 20, batch: 20
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // payloads = post: 40, batch: 40
+ // This will exceed POST byte limit, so will be in the 4th post, part of the 2nd batch.
+ ok((await pq.enqueue(makeRecord(20))).enqueued); // payloads = post: 20, batch: 60
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([20, 20]),
+ payloadBytes: 40,
+ numRecords: 2,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time0]],
+ },
+ {
+ nbytes: requestBytesFor([20]),
+ payloadBytes: 20,
+ numRecords: 1,
+ commit: true,
+ batch: 1234,
+ headers: [["x-if-unmodified-since", time0]],
+ },
+ {
+ nbytes: requestBytesFor([20, 20]),
+ payloadBytes: 40,
+ numRecords: 2,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time1]],
+ },
+ {
+ nbytes: requestBytesFor([20]),
+ payloadBytes: 20,
+ numRecords: 1,
+ commit: true,
+ batch: 5678,
+ headers: [["x-if-unmodified-since", time1]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 2,
+ payloadBytes: 60,
+ numRecords: 3,
+ didCommit: true,
+ batch: 1234,
+ serverBatch: true,
+ },
+ {
+ posts: 2,
+ payloadBytes: 60,
+ numRecords: 3,
+ didCommit: true,
+ batch: 5678,
+ serverBatch: true,
+ },
+ ]);
+
+ equal(pq.lastModified, time1 + 200);
+});
+
+// Test we split up the posts when we exceed the record limit when batch semantics
+// are in place.
+add_task(async function test_max_post_records_batch() {
+ let config = {
+ max_post_bytes: 1000,
+ max_post_records: 2,
+ max_total_bytes: 5000,
+ max_total_records: 100,
+ max_record_payload_bytes: 1000,
+ max_request_bytes: 1000,
+ };
+
+ const time = 11111111;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: {
+ "x-last-modified": time + 200,
+ "x-weave-timestamp": time + 200,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+
+ // will exceed record limit of 2, so will be in 2nd post.
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([20, 20]),
+ numRecords: 2,
+ payloadBytes: 40,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time]],
+ },
+ {
+ nbytes: requestBytesFor([20]),
+ numRecords: 1,
+ payloadBytes: 20,
+ commit: true,
+ batch: 1234,
+ headers: [["x-if-unmodified-since", time]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 2,
+ payloadBytes: 60,
+ numRecords: 3,
+ batch: 1234,
+ serverBatch: true,
+ didCommit: true,
+ },
+ ]);
+
+ equal(pq.lastModified, time + 200);
+});
+
+// Test we do the right thing when the batch record limit is exceeded.
+add_task(async function test_max_records_batch() {
+ let config = {
+ max_post_bytes: 1000,
+ max_post_records: 3,
+ max_total_bytes: 10000,
+ max_total_records: 5,
+ max_record_payload_bytes: 1000,
+ max_request_bytes: 10000,
+ };
+
+ const time0 = 11111111;
+ const time1 = 22222222;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time0, "x-weave-timestamp": time0 + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time1, "x-weave-timestamp": time1 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 5678 },
+ headers: { "x-last-modified": time1, "x-weave-timestamp": time1 + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 5678 },
+ headers: {
+ "x-last-modified": time1 + 200,
+ "x-weave-timestamp": time1 + 200,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time0, responseGenerator());
+
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+
+ ok((await pq.enqueue(makeRecord(20))).enqueued);
+
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ // 3 records
+ nbytes: requestBytesFor([20, 20, 20]),
+ payloadBytes: 60,
+ numRecords: 3,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time0]],
+ },
+ {
+ // 2 records -- end batch1
+ nbytes: requestBytesFor([20, 20]),
+ payloadBytes: 40,
+ numRecords: 2,
+ commit: true,
+ batch: 1234,
+ headers: [["x-if-unmodified-since", time0]],
+ },
+ {
+ // 3 records
+ nbytes: requestBytesFor([20, 20, 20]),
+ payloadBytes: 60,
+ numRecords: 3,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time1]],
+ },
+ {
+ // 1 record -- end batch2
+ nbytes: requestBytesFor([20]),
+ payloadBytes: 20,
+ numRecords: 1,
+ commit: true,
+ batch: 5678,
+ headers: [["x-if-unmodified-since", time1]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 2,
+ payloadBytes: 100,
+ numRecords: 5,
+ batch: 1234,
+ serverBatch: true,
+ didCommit: true,
+ },
+ {
+ posts: 2,
+ payloadBytes: 80,
+ numRecords: 4,
+ batch: 5678,
+ serverBatch: true,
+ didCommit: true,
+ },
+ ]);
+
+ equal(pq.lastModified, time1 + 200);
+});
+
+// Test we do the right thing when the limits are met but not exceeded.
+add_task(async function test_packed_batch() {
+ let config = {
+ max_post_bytes: 41,
+ max_post_records: 4,
+
+ max_total_bytes: 81,
+ max_total_records: 8,
+
+ max_record_payload_bytes: 20 + makeRecord.nonPayloadOverhead + 1,
+ max_request_bytes: requestBytesFor([10, 10, 10, 10]) + 1,
+ };
+
+ const time = 11111111;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
+ };
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: {
+ "x-last-modified": time + 200,
+ "x-weave-timestamp": time + 200,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+ ok((await pq.enqueue(makeRecord(10))).enqueued);
+
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([10, 10, 10, 10]),
+ numRecords: 4,
+ payloadBytes: 40,
+ commit: false,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time]],
+ },
+ {
+ nbytes: requestBytesFor([10, 10, 10, 10]),
+ numRecords: 4,
+ payloadBytes: 40,
+ commit: true,
+ batch: 1234,
+ headers: [["x-if-unmodified-since", time]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 2,
+ payloadBytes: 80,
+ numRecords: 8,
+ batch: 1234,
+ serverBatch: true,
+ didCommit: true,
+ },
+ ]);
+
+ equal(pq.lastModified, time + 200);
+});
+
+// Tests that check that a single record fails to enqueue for the provided config
+async function test_enqueue_failure_case(failureLimit, config) {
+ const time = 11111111;
+ function* responseGenerator() {
+ yield {
+ success: true,
+ status: 202,
+ obj: { batch: 1234 },
+ headers: {
+ "x-last-modified": time + 100,
+ "x-weave-timestamp": time + 100,
+ },
+ };
+ }
+
+ let { pq, stats } = makePostQueue(config, time, responseGenerator());
+ // Check on empty postqueue
+ let result = await pq.enqueue(makeRecord(failureLimit + 1));
+ ok(!result.enqueued);
+ notEqual(result.error, undefined);
+
+ ok((await pq.enqueue(makeRecord(5))).enqueued);
+
+ // check on nonempty postqueue
+ result = await pq.enqueue(makeRecord(failureLimit + 1));
+ ok(!result.enqueued);
+ notEqual(result.error, undefined);
+
+ // make sure that we keep working, skipping the bad record entirely
+ // (handling the error the queue reported is left up to caller)
+ ok((await pq.enqueue(makeRecord(5))).enqueued);
+
+ await pq.flush(true);
+
+ deepEqual(stats.posts, [
+ {
+ nbytes: requestBytesFor([5, 5]),
+ numRecords: 2,
+ payloadBytes: 10,
+ commit: true,
+ batch: "true",
+ headers: [["x-if-unmodified-since", time]],
+ },
+ ]);
+
+ deepEqual(stats.batches, [
+ {
+ posts: 1,
+ payloadBytes: 10,
+ numRecords: 2,
+ batch: 1234,
+ serverBatch: true,
+ didCommit: true,
+ },
+ ]);
+
+ equal(pq.lastModified, time + 100);
+}
+
+add_task(async function test_max_post_bytes_enqueue_failure() {
+ await test_enqueue_failure_case(50, {
+ max_post_bytes: 50,
+ max_post_records: 100,
+
+ max_total_bytes: 5000,
+ max_total_records: 100,
+
+ max_record_payload_bytes: 500,
+ max_request_bytes: 500,
+ });
+});
+
+add_task(async function test_max_request_bytes_enqueue_failure() {
+ await test_enqueue_failure_case(50, {
+ max_post_bytes: 500,
+ max_post_records: 100,
+
+ max_total_bytes: 5000,
+ max_total_records: 100,
+
+ max_record_payload_bytes: 500,
+ max_request_bytes: 50,
+ });
+});
+
+add_task(async function test_max_record_payload_bytes_enqueue_failure() {
+ await test_enqueue_failure_case(50, {
+ max_post_bytes: 500,
+ max_post_records: 100,
+
+ max_total_bytes: 5000,
+ max_total_records: 100,
+
+ max_record_payload_bytes: 50,
+ max_request_bytes: 500,
+ });
+});
diff --git a/services/sync/tests/unit/test_prefs_engine.js b/services/sync/tests/unit/test_prefs_engine.js
new file mode 100644
index 0000000000..77a4474cb4
--- /dev/null
+++ b/services/sync/tests/unit/test_prefs_engine.js
@@ -0,0 +1,134 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { getPrefsGUIDForTest } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/prefs.sys.mjs"
+);
+const PREFS_GUID = getPrefsGUIDForTest();
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+async function cleanup(engine, server) {
+ await engine._tracker.stop();
+ await engine.wipeClient();
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Service.recordManager.clearCache();
+ await promiseStopServer(server);
+}
+
+add_task(async function test_modified_after_fail() {
+ let engine = Service.engineManager.get("prefs");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ try {
+ // The homepage pref is synced by default.
+ _("Set homepage before first sync");
+ Services.prefs.setStringPref("browser.startup.homepage", "about:welcome");
+
+ _("First sync; create collection and pref record on server");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let collection = server.user("foo").collection("prefs");
+ equal(
+ collection.cleartext(PREFS_GUID).value["browser.startup.homepage"],
+ "about:welcome",
+ "Should upload homepage in pref record"
+ );
+ ok(
+ !engine._tracker.modified,
+ "Tracker shouldn't be modified after first sync"
+ );
+
+ // Our tracker only has a `modified` flag that's reset after a
+ // successful upload. Force it to remain set by failing the
+ // upload.
+ _("Second sync; flag tracker as modified and throw on upload");
+ Services.prefs.setStringPref("browser.startup.homepage", "about:robots");
+ engine._tracker.modified = true;
+ let oldPost = collection.post;
+ collection.post = () => {
+ throw new Error("Sync this!");
+ };
+ await Assert.rejects(
+ sync_engine_and_validate_telem(engine, true),
+ ex => ex.success === false
+ );
+ ok(
+ engine._tracker.modified,
+ "Tracker should remain modified after failed sync"
+ );
+
+ _("Third sync");
+ collection.post = oldPost;
+ await sync_engine_and_validate_telem(engine, false);
+ equal(
+ collection.cleartext(PREFS_GUID).value["browser.startup.homepage"],
+ "about:robots",
+ "Should upload new homepage on third sync"
+ );
+ ok(
+ !engine._tracker.modified,
+ "Tracker shouldn't be modified again after third sync"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
+
+add_task(async function test_allow_arbitrary() {
+ let engine = Service.engineManager.get("prefs");
+
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ try {
+ _("Create collection and pref record on server");
+ await sync_engine_and_validate_telem(engine, false);
+
+ let collection = server.user("foo").collection("prefs");
+
+ _("Insert arbitrary pref into remote record");
+ let cleartext1 = collection.cleartext(PREFS_GUID);
+ cleartext1.value.let_viruses_take_over = true;
+ collection.insert(
+ PREFS_GUID,
+ encryptPayload(cleartext1),
+ new_timestamp() + 5
+ );
+
+ _("Sync again; client shouldn't allow pref");
+ await sync_engine_and_validate_telem(engine, false);
+ ok(
+ !Services.prefs.getBoolPref("let_viruses_take_over", false),
+ "Shouldn't allow arbitrary remote prefs without control pref"
+ );
+
+ _("Sync with control pref set; client should set new pref");
+ Services.prefs.setBoolPref(
+ "services.sync.prefs.sync.let_viruses_take_over_take_two",
+ true
+ );
+
+ let cleartext2 = collection.cleartext(PREFS_GUID);
+ cleartext2.value.let_viruses_take_over_take_two = true;
+ collection.insert(
+ PREFS_GUID,
+ encryptPayload(cleartext2),
+ new_timestamp() + 5
+ );
+ // Reset the last sync time so that the engine fetches the record again.
+ await engine.setLastSync(0);
+ await sync_engine_and_validate_telem(engine, false);
+ ok(
+ Services.prefs.getBoolPref("let_viruses_take_over_take_two"),
+ "Should set arbitrary remote pref with control pref"
+ );
+ } finally {
+ await cleanup(engine, server);
+ }
+});
diff --git a/services/sync/tests/unit/test_prefs_store.js b/services/sync/tests/unit/test_prefs_store.js
new file mode 100644
index 0000000000..53ee68fc95
--- /dev/null
+++ b/services/sync/tests/unit/test_prefs_store.js
@@ -0,0 +1,391 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { PromiseTestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/PromiseTestUtils.sys.mjs"
+);
+PromiseTestUtils.allowMatchingRejectionsGlobally(
+ /Unable to arm timer, the object has been finalized\./
+);
+PromiseTestUtils.allowMatchingRejectionsGlobally(
+ /IOUtils\.profileBeforeChange getter: IOUtils: profileBeforeChange phase has already finished/
+);
+
+const { PrefRec, getPrefsGUIDForTest } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/prefs.sys.mjs"
+);
+const PREFS_GUID = getPrefsGUIDForTest();
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+const DEFAULT_THEME_ID = "default-theme@mozilla.org";
+const COMPACT_THEME_ID = "firefox-compact-light@mozilla.org";
+
+AddonTestUtils.init(this);
+AddonTestUtils.createAppInfo(
+ "xpcshell@tests.mozilla.org",
+ "XPCShell",
+ "1",
+ "1.9.2"
+);
+AddonTestUtils.overrideCertDB();
+
+add_task(async function run_test() {
+ _("Test fixtures.");
+ // Part of this test ensures the default theme, via the preference
+ // extensions.activeThemeID, is synced correctly - so we do a little
+ // addons initialization to allow this to work.
+
+ // Enable application scopes to ensure the builtin theme is going to
+ // be installed as part of the the addon manager startup.
+ Services.prefs.setIntPref(
+ "extensions.enabledScopes",
+ AddonManager.SCOPE_APPLICATION
+ );
+ await AddonTestUtils.promiseStartupManager();
+
+ // Install another built-in theme.
+ await AddonManager.installBuiltinAddon("resource://builtin-themes/light/");
+
+ const defaultThemeAddon = await AddonManager.getAddonByID(DEFAULT_THEME_ID);
+ ok(defaultThemeAddon, "Got an addon wrapper for the default theme");
+
+ const otherThemeAddon = await AddonManager.getAddonByID(COMPACT_THEME_ID);
+ ok(otherThemeAddon, "Got an addon wrapper for the compact theme");
+
+ await otherThemeAddon.enable();
+
+ // read our custom prefs file before doing anything.
+ Services.prefs.readDefaultPrefsFromFile(
+ do_get_file("prefs_test_prefs_store.js")
+ );
+
+ let engine = Service.engineManager.get("prefs");
+ let store = engine._store;
+ try {
+ _("Expect the compact light theme to be active");
+ Assert.strictEqual(
+ Services.prefs.getStringPref("extensions.activeThemeID"),
+ COMPACT_THEME_ID
+ );
+
+ _("The GUID corresponds to XUL App ID.");
+ let allIDs = await store.getAllIDs();
+ let ids = Object.keys(allIDs);
+ Assert.equal(ids.length, 1);
+ Assert.equal(ids[0], PREFS_GUID);
+ Assert.ok(allIDs[PREFS_GUID]);
+
+ Assert.ok(await store.itemExists(PREFS_GUID));
+ Assert.equal(false, await store.itemExists("random-gibberish"));
+
+ _("Unknown prefs record is created as deleted.");
+ let record = await store.createRecord("random-gibberish", "prefs");
+ Assert.ok(record.deleted);
+
+ _("Prefs record contains only prefs that should be synced.");
+ record = await store.createRecord(PREFS_GUID, "prefs");
+ Assert.strictEqual(record.value["testing.int"], 123);
+ Assert.strictEqual(record.value["testing.string"], "ohai");
+ Assert.strictEqual(record.value["testing.bool"], true);
+ // non-existing prefs get null as the value
+ Assert.strictEqual(record.value["testing.nonexistent"], null);
+ // as do prefs that have a default value.
+ Assert.strictEqual(record.value["testing.default"], null);
+ Assert.strictEqual(record.value["testing.turned.off"], undefined);
+ Assert.strictEqual(record.value["testing.not.turned.on"], undefined);
+
+ _("Prefs record contains the correct control prefs.");
+ // All control prefs which have the default value and where the pref
+ // itself is synced should appear, but with null as the value.
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.int"],
+ null
+ );
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.string"],
+ null
+ );
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.bool"],
+ null
+ );
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.dont.change"],
+ null
+ );
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.nonexistent"],
+ null
+ );
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.default"],
+ null
+ );
+
+ // but this control pref has a non-default value so that value is synced.
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.turned.off"],
+ false
+ );
+
+ _("Unsyncable prefs are treated correctly.");
+ // Prefs we consider unsyncable (since they are URLs that won't be stable on
+ // another firefox) shouldn't be included - neither the value nor the
+ // control pref should appear.
+ Assert.strictEqual(record.value["testing.unsynced.url"], undefined);
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.unsynced.url"],
+ undefined
+ );
+ // Other URLs with user prefs should be synced, though.
+ Assert.strictEqual(
+ record.value["testing.synced.url"],
+ "https://www.example.com"
+ );
+ Assert.strictEqual(
+ record.value["services.sync.prefs.sync.testing.synced.url"],
+ null
+ );
+
+ _("Update some prefs, including one that's to be reset/deleted.");
+ // This pref is not going to be reset or deleted as there's no "control pref"
+ // in either the incoming record or locally.
+ Services.prefs.setStringPref(
+ "testing.deleted-without-control-pref",
+ "I'm deleted-without-control-pref"
+ );
+ // Another pref with only a local control pref.
+ Services.prefs.setStringPref(
+ "testing.deleted-with-local-control-pref",
+ "I'm deleted-with-local-control-pref"
+ );
+ Services.prefs.setBoolPref(
+ "services.sync.prefs.sync.testing.deleted-with-local-control-pref",
+ true
+ );
+ // And a pref without a local control pref but one that's incoming.
+ Services.prefs.setStringPref(
+ "testing.deleted-with-incoming-control-pref",
+ "I'm deleted-with-incoming-control-pref"
+ );
+ record = new PrefRec("prefs", PREFS_GUID);
+ record.value = {
+ "extensions.activeThemeID": DEFAULT_THEME_ID,
+ "testing.int": 42,
+ "testing.string": "im in ur prefs",
+ "testing.bool": false,
+ "testing.deleted-without-control-pref": null,
+ "testing.deleted-with-local-control-pref": null,
+ "testing.deleted-with-incoming-control-pref": null,
+ "services.sync.prefs.sync.testing.deleted-with-incoming-control-pref": true,
+ "testing.somepref": "im a new pref from other device",
+ "services.sync.prefs.sync.testing.somepref": true,
+ // Pretend some a stale remote client is overwriting it with a value
+ // we consider unsyncable.
+ "testing.synced.url": "blob:ebeb707a-502e-40c6-97a5-dd4bda901463",
+ // Make sure we can replace the unsynced URL with a valid URL.
+ "testing.unsynced.url": "https://www.example.com/2",
+ // Make sure our "master control pref" is ignored.
+ "services.sync.prefs.dangerously_allow_arbitrary": true,
+ "services.sync.prefs.sync.services.sync.prefs.dangerously_allow_arbitrary": true,
+ };
+
+ const onceAddonEnabled = AddonTestUtils.promiseAddonEvent("onEnabled");
+
+ await store.update(record);
+ Assert.strictEqual(Services.prefs.getIntPref("testing.int"), 42);
+ Assert.strictEqual(
+ Services.prefs.getStringPref("testing.string"),
+ "im in ur prefs"
+ );
+ Assert.strictEqual(Services.prefs.getBoolPref("testing.bool"), false);
+ Assert.strictEqual(
+ Services.prefs.getStringPref("testing.deleted-without-control-pref"),
+ "I'm deleted-without-control-pref"
+ );
+ Assert.strictEqual(
+ Services.prefs.getPrefType("testing.deleted-with-local-control-pref"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.strictEqual(
+ Services.prefs.getStringPref(
+ "testing.deleted-with-incoming-control-pref"
+ ),
+ "I'm deleted-with-incoming-control-pref"
+ );
+ Assert.strictEqual(
+ Services.prefs.getStringPref("testing.dont.change"),
+ "Please don't change me."
+ );
+ Assert.strictEqual(
+ Services.prefs.getPrefType("testing.somepref"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.strictEqual(
+ Services.prefs.getStringPref("testing.synced.url"),
+ "https://www.example.com"
+ );
+ Assert.strictEqual(
+ Services.prefs.getStringPref("testing.unsynced.url"),
+ "https://www.example.com/2"
+ );
+ Assert.strictEqual(
+ Svc.PrefBranch.getPrefType("prefs.sync.testing.somepref"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.strictEqual(
+ Services.prefs.getBoolPref(
+ "services.sync.prefs.dangerously_allow_arbitrary"
+ ),
+ false
+ );
+ Assert.strictEqual(
+ Services.prefs.getPrefType(
+ "services.sync.prefs.sync.services.sync.prefs.dangerously_allow_arbitrary"
+ ),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+
+ await onceAddonEnabled;
+ ok(
+ !defaultThemeAddon.userDisabled,
+ "the default theme should have been enabled"
+ );
+ ok(
+ otherThemeAddon.userDisabled,
+ "the compact theme should have been disabled"
+ );
+
+ _("Only the current app's preferences are applied.");
+ record = new PrefRec("prefs", "some-fake-app");
+ record.value = {
+ "testing.int": 98,
+ };
+ await store.update(record);
+ Assert.equal(Services.prefs.getIntPref("testing.int"), 42);
+ } finally {
+ for (const pref of Services.prefs.getChildList("")) {
+ Services.prefs.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_dangerously_allow() {
+ _("services.sync.prefs.dangerously_allow_arbitrary");
+ // Bug 1538015 added a capability to "dangerously allow" arbitrary prefs.
+ // Bug 1854698 removed that capability but did keep the fact we never
+ // sync the pref which enabled the "dangerous" behaviour, just incase someone
+ // tries to sync it back to a profile which *does* support that pref.
+ Services.prefs.readDefaultPrefsFromFile(
+ do_get_file("prefs_test_prefs_store.js")
+ );
+
+ let engine = Service.engineManager.get("prefs");
+ let store = engine._store;
+ try {
+ // an incoming record with our old "dangerous" pref.
+ let record = new PrefRec("prefs", PREFS_GUID);
+ record.value = {
+ "services.sync.prefs.dangerously_allow_arbitrary": true,
+ "services.sync.prefs.sync.services.sync.prefs.dangerously_allow_arbitrary": true,
+ };
+ await store.update(record);
+ Assert.strictEqual(
+ Services.prefs.getBoolPref(
+ "services.sync.prefs.dangerously_allow_arbitrary"
+ ),
+ false
+ );
+ Assert.strictEqual(
+ Services.prefs.getPrefType(
+ "services.sync.prefs.sync.services.sync.prefs.dangerously_allow_arbitrary"
+ ),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ } finally {
+ for (const pref of Services.prefs.getChildList("")) {
+ Services.prefs.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_incoming_sets_seen() {
+ _("Test the sync-seen allow-list");
+
+ let engine = Service.engineManager.get("prefs");
+ let store = engine._store;
+
+ Services.prefs.readDefaultPrefsFromFile(
+ do_get_file("prefs_test_prefs_store.js")
+ );
+ const defaultValue = "the value";
+ Assert.equal(Services.prefs.getStringPref("testing.seen"), defaultValue);
+
+ let record = await store.createRecord(PREFS_GUID, "prefs");
+ // Haven't seen a non-default value before, so remains null.
+ Assert.strictEqual(record.value["testing.seen"], null);
+
+ // pretend an incoming record with the default value - it might not be
+ // the default everywhere, so we treat it specially.
+ record = new PrefRec("prefs", PREFS_GUID);
+ record.value = {
+ "testing.seen": defaultValue,
+ };
+ await store.update(record);
+ // Our special control value should now be set.
+ Assert.strictEqual(
+ Services.prefs.getBoolPref("services.sync.prefs.sync-seen.testing.seen"),
+ true
+ );
+ // It's still the default value, so the value is not considered changed
+ Assert.equal(Services.prefs.prefHasUserValue("testing.seen"), false);
+
+ // But now that special control value is set, the record always contains the value.
+ record = await store.createRecord(PREFS_GUID, "prefs");
+ Assert.strictEqual(record.value["testing.seen"], defaultValue);
+});
+
+add_task(async function test_outgoing_when_changed() {
+ _("Test the 'seen' pref is set first sync of non-default value");
+
+ let engine = Service.engineManager.get("prefs");
+ let store = engine._store;
+ for (const pref of Services.prefs.getChildList("")) {
+ Services.prefs.clearUserPref(pref);
+ }
+
+ Services.prefs.readDefaultPrefsFromFile(
+ do_get_file("prefs_test_prefs_store.js")
+ );
+ const defaultValue = "the value";
+ Assert.equal(Services.prefs.getStringPref("testing.seen"), defaultValue);
+
+ let record = await store.createRecord(PREFS_GUID, "prefs");
+ // Haven't seen a non-default value before, so remains null.
+ Assert.strictEqual(record.value["testing.seen"], null);
+
+ // Change the value.
+ Services.prefs.setStringPref("testing.seen", "new value");
+ record = await store.createRecord(PREFS_GUID, "prefs");
+ // creating the record toggled that "seen" pref.
+ Assert.strictEqual(
+ Services.prefs.getBoolPref("services.sync.prefs.sync-seen.testing.seen"),
+ true
+ );
+ Assert.strictEqual(Services.prefs.getStringPref("testing.seen"), "new value");
+
+ // Resetting the pref does not change that seen value.
+ Services.prefs.clearUserPref("testing.seen");
+ Assert.strictEqual(
+ Services.prefs.getStringPref("testing.seen"),
+ defaultValue
+ );
+
+ record = await store.createRecord(PREFS_GUID, "prefs");
+ Assert.strictEqual(
+ Services.prefs.getBoolPref("services.sync.prefs.sync-seen.testing.seen"),
+ true
+ );
+});
diff --git a/services/sync/tests/unit/test_prefs_tracker.js b/services/sync/tests/unit/test_prefs_tracker.js
new file mode 100644
index 0000000000..ecf3f18420
--- /dev/null
+++ b/services/sync/tests/unit/test_prefs_tracker.js
@@ -0,0 +1,93 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function run_test() {
+ let engine = Service.engineManager.get("prefs");
+ let tracker = engine._tracker;
+
+ try {
+ _("tracker.modified corresponds to preference.");
+ // Assert preference is not defined.
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("engine.prefs.modified"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.ok(!tracker.modified);
+
+ tracker.modified = true;
+ Assert.equal(Svc.PrefBranch.getBoolPref("engine.prefs.modified"), true);
+ Assert.ok(tracker.modified);
+
+ _("Engine's getChangedID() just returns the one GUID we have.");
+ let changedIDs = await engine.getChangedIDs();
+ let ids = Object.keys(changedIDs);
+ Assert.equal(ids.length, 1);
+ Assert.equal(ids[0], CommonUtils.encodeBase64URL(Services.appinfo.ID));
+
+ Svc.PrefBranch.setBoolPref("engine.prefs.modified", false);
+ Assert.ok(!tracker.modified);
+
+ _("No modified state, so no changed IDs.");
+ do_check_empty(await engine.getChangedIDs());
+
+ _("Initial score is 0");
+ Assert.equal(tracker.score, 0);
+
+ _("Test fixtures.");
+ Svc.PrefBranch.setBoolPref("prefs.sync.testing.int", true);
+
+ _(
+ "Test fixtures haven't upped the tracker score yet because it hasn't started tracking yet."
+ );
+ Assert.equal(tracker.score, 0);
+
+ _("Tell the tracker to start tracking changes.");
+ tracker.start();
+ Services.prefs.setIntPref("testing.int", 23);
+ await tracker.asyncObserver.promiseObserversComplete();
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE);
+ Assert.equal(tracker.modified, true);
+
+ _("Clearing changed IDs reset modified status.");
+ await tracker.clearChangedIDs();
+ Assert.equal(tracker.modified, false);
+
+ _("Resetting a pref ups the score, too.");
+ Services.prefs.clearUserPref("testing.int");
+ await tracker.asyncObserver.promiseObserversComplete();
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 2);
+ Assert.equal(tracker.modified, true);
+ await tracker.clearChangedIDs();
+
+ _("So does changing a pref sync pref.");
+ Svc.PrefBranch.setBoolPref("prefs.sync.testing.int", false);
+ await tracker.asyncObserver.promiseObserversComplete();
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ Assert.equal(tracker.modified, true);
+ await tracker.clearChangedIDs();
+
+ _(
+ "Now that the pref sync pref has been flipped, changes to it won't be picked up."
+ );
+ Services.prefs.setIntPref("testing.int", 42);
+ await tracker.asyncObserver.promiseObserversComplete();
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ Assert.equal(tracker.modified, false);
+ await tracker.clearChangedIDs();
+
+ _("Changing some other random pref won't do anything.");
+ Services.prefs.setStringPref("testing.other", "blergh");
+ await tracker.asyncObserver.promiseObserversComplete();
+ Assert.equal(tracker.score, SCORE_INCREMENT_XLARGE * 3);
+ Assert.equal(tracker.modified, false);
+ } finally {
+ await tracker.stop();
+ for (const pref of Services.prefs.getChildList("")) {
+ Services.prefs.clearUserPref(pref);
+ }
+ }
+});
diff --git a/services/sync/tests/unit/test_records_crypto.js b/services/sync/tests/unit/test_records_crypto.js
new file mode 100644
index 0000000000..8b841653cd
--- /dev/null
+++ b/services/sync/tests/unit/test_records_crypto.js
@@ -0,0 +1,189 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { CollectionKeyManager, CryptoWrapper } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+var cryptoWrap;
+
+function crypted_resource_handler(metadata, response) {
+ let obj = {
+ id: "resource",
+ modified: cryptoWrap.modified,
+ payload: JSON.stringify(cryptoWrap.payload),
+ };
+ return httpd_basic_auth_handler(JSON.stringify(obj), metadata, response);
+}
+
+function prepareCryptoWrap(collection, id) {
+ let w = new CryptoWrapper();
+ w.cleartext.stuff = "my payload here";
+ w.collection = collection;
+ w.id = id;
+ return w;
+}
+
+add_task(async function test_records_crypto() {
+ let server;
+
+ await configureIdentity({ username: "john@example.com" });
+ let keyBundle = Service.identity.syncKeyBundle;
+
+ try {
+ let log = Log.repository.getLogger("Test");
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ log.info("Setting up server and authenticator");
+
+ server = httpd_setup({ "/steam/resource": crypted_resource_handler });
+
+ log.info("Creating a record");
+
+ cryptoWrap = prepareCryptoWrap("steam", "resource");
+
+ log.info("cryptoWrap: " + cryptoWrap.toString());
+
+ log.info("Encrypting a record");
+
+ await cryptoWrap.encrypt(keyBundle);
+ log.info("Ciphertext is " + cryptoWrap.ciphertext);
+ Assert.ok(cryptoWrap.ciphertext != null);
+
+ let firstIV = cryptoWrap.IV;
+
+ log.info("Decrypting the record");
+
+ let payload = await cryptoWrap.decrypt(keyBundle);
+ Assert.equal(payload.stuff, "my payload here");
+ Assert.notEqual(payload, cryptoWrap.payload); // wrap.data.payload is the encrypted one
+
+ log.info("Make sure multiple decrypts cause failures");
+ let error = "";
+ try {
+ payload = await cryptoWrap.decrypt(keyBundle);
+ } catch (ex) {
+ error = ex;
+ }
+ Assert.equal(error.message, "No ciphertext: nothing to decrypt?");
+
+ log.info("Re-encrypting the record with alternate payload");
+
+ cryptoWrap.cleartext.stuff = "another payload";
+ await cryptoWrap.encrypt(keyBundle);
+ let secondIV = cryptoWrap.IV;
+ payload = await cryptoWrap.decrypt(keyBundle);
+ Assert.equal(payload.stuff, "another payload");
+
+ log.info("Make sure multiple encrypts use different IVs");
+ Assert.notEqual(firstIV, secondIV);
+
+ log.info(await "Make sure differing ids cause failures");
+ await cryptoWrap.encrypt(keyBundle);
+ cryptoWrap.data.id = "other";
+ error = "";
+ try {
+ await cryptoWrap.decrypt(keyBundle);
+ } catch (ex) {
+ error = ex;
+ }
+ Assert.equal(error.message, "Record id mismatch: resource != other");
+
+ log.info("Make sure wrong hmacs cause failures");
+ await cryptoWrap.encrypt(keyBundle);
+ cryptoWrap.hmac = "foo";
+ error = "";
+ try {
+ await cryptoWrap.decrypt(keyBundle);
+ } catch (ex) {
+ error = ex;
+ }
+ Assert.equal(
+ error.message.substr(0, 42),
+ "Record SHA256 HMAC mismatch: should be foo"
+ );
+
+ // Checking per-collection keys and default key handling.
+
+ await generateNewKeys(Service.collectionKeys);
+ let bookmarkItem = prepareCryptoWrap("bookmarks", "foo");
+ await bookmarkItem.encrypt(
+ Service.collectionKeys.keyForCollection("bookmarks")
+ );
+ log.info("Ciphertext is " + bookmarkItem.ciphertext);
+ Assert.ok(bookmarkItem.ciphertext != null);
+ log.info("Decrypting the record explicitly with the default key.");
+ Assert.equal(
+ (await bookmarkItem.decrypt(Service.collectionKeys._default)).stuff,
+ "my payload here"
+ );
+
+ // Per-collection keys.
+ // Generate a key for "bookmarks".
+ await generateNewKeys(Service.collectionKeys, ["bookmarks"]);
+ bookmarkItem = prepareCryptoWrap("bookmarks", "foo");
+ Assert.equal(bookmarkItem.collection, "bookmarks");
+
+ // Encrypt. This'll use the "bookmarks" encryption key, because we have a
+ // special key for it. The same key will need to be used for decryption.
+ await bookmarkItem.encrypt(
+ Service.collectionKeys.keyForCollection("bookmarks")
+ );
+ Assert.ok(bookmarkItem.ciphertext != null);
+
+ // Attempt to use the default key, because this is a collision that could
+ // conceivably occur in the real world. Decryption will error, because
+ // it's not the bookmarks key.
+ let err;
+ try {
+ await bookmarkItem.decrypt(Service.collectionKeys._default);
+ } catch (ex) {
+ err = ex;
+ }
+ Assert.equal("Record SHA256 HMAC mismatch", err.message.substr(0, 27));
+
+ // Explicitly check that it's using the bookmarks key.
+ // This should succeed.
+ Assert.equal(
+ (
+ await bookmarkItem.decrypt(
+ Service.collectionKeys.keyForCollection("bookmarks")
+ )
+ ).stuff,
+ "my payload here"
+ );
+
+ Assert.ok(Service.collectionKeys.hasKeysFor(["bookmarks"]));
+
+ // Add a key for some new collection and verify that it isn't the
+ // default key.
+ Assert.ok(!Service.collectionKeys.hasKeysFor(["forms"]));
+ Assert.ok(!Service.collectionKeys.hasKeysFor(["bookmarks", "forms"]));
+ let oldFormsKey = Service.collectionKeys.keyForCollection("forms");
+ Assert.equal(oldFormsKey, Service.collectionKeys._default);
+ let newKeys = await Service.collectionKeys.ensureKeysFor(["forms"]);
+ Assert.ok(newKeys.hasKeysFor(["forms"]));
+ Assert.ok(newKeys.hasKeysFor(["bookmarks", "forms"]));
+ let newFormsKey = newKeys.keyForCollection("forms");
+ Assert.notEqual(newFormsKey, oldFormsKey);
+
+ // Verify that this doesn't overwrite keys
+ let regetKeys = await newKeys.ensureKeysFor(["forms"]);
+ Assert.equal(regetKeys.keyForCollection("forms"), newFormsKey);
+
+ const emptyKeys = new CollectionKeyManager();
+ payload = {
+ default: Service.collectionKeys._default.keyPairB64,
+ collections: {},
+ };
+ // Verify that not passing `modified` doesn't throw
+ emptyKeys.setContents(payload, null);
+
+ log.info("Done!");
+ } finally {
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_records_wbo.js b/services/sync/tests/unit/test_records_wbo.js
new file mode 100644
index 0000000000..61ba33d749
--- /dev/null
+++ b/services/sync/tests/unit/test_records_wbo.js
@@ -0,0 +1,85 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_test(function test_toJSON() {
+ _("Create a record, for now without a TTL.");
+ let wbo = new WBORecord("coll", "a_record");
+ wbo.modified = 12345;
+ wbo.sortindex = 42;
+ wbo.payload = {};
+
+ _(
+ "Verify that the JSON representation contains the WBO properties, but not TTL."
+ );
+ let json = JSON.parse(JSON.stringify(wbo));
+ Assert.equal(json.modified, 12345);
+ Assert.equal(json.sortindex, 42);
+ Assert.equal(json.payload, "{}");
+ Assert.equal(false, "ttl" in json);
+
+ _("Set a TTL, make sure it's present in the JSON representation.");
+ wbo.ttl = 30 * 60;
+ json = JSON.parse(JSON.stringify(wbo));
+ Assert.equal(json.ttl, 30 * 60);
+ run_next_test();
+});
+
+add_task(async function test_fetch() {
+ let record = {
+ id: "asdf-1234-asdf-1234",
+ modified: 2454725.98283,
+ payload: JSON.stringify({ cheese: "roquefort" }),
+ };
+ let record2 = {
+ id: "record2",
+ modified: 2454725.98284,
+ payload: JSON.stringify({ cheese: "gruyere" }),
+ };
+ let coll = [
+ {
+ id: "record2",
+ modified: 2454725.98284,
+ payload: JSON.stringify({ cheese: "gruyere" }),
+ },
+ ];
+
+ _("Setting up server.");
+ let server = httpd_setup({
+ "/record": httpd_handler(200, "OK", JSON.stringify(record)),
+ "/record2": httpd_handler(200, "OK", JSON.stringify(record2)),
+ "/coll": httpd_handler(200, "OK", JSON.stringify(coll)),
+ });
+
+ try {
+ _("Fetching a WBO record");
+ let rec = new WBORecord("coll", "record");
+ await rec.fetch(Service.resource(server.baseURI + "/record"));
+ Assert.equal(rec.id, "asdf-1234-asdf-1234"); // NOT "record"!
+
+ Assert.equal(rec.modified, 2454725.98283);
+ Assert.equal(typeof rec.payload, "object");
+ Assert.equal(rec.payload.cheese, "roquefort");
+
+ _("Fetching a WBO record using the record manager");
+ let rec2 = await Service.recordManager.get(server.baseURI + "/record2");
+ Assert.equal(rec2.id, "record2");
+ Assert.equal(rec2.modified, 2454725.98284);
+ Assert.equal(typeof rec2.payload, "object");
+ Assert.equal(rec2.payload.cheese, "gruyere");
+ Assert.equal(Service.recordManager.response.status, 200);
+
+ // Testing collection extraction.
+ _("Extracting collection.");
+ let rec3 = new WBORecord("tabs", "foo"); // Create through constructor.
+ Assert.equal(rec3.collection, "tabs");
+ } finally {
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_resource.js b/services/sync/tests/unit/test_resource.js
new file mode 100644
index 0000000000..5182784639
--- /dev/null
+++ b/services/sync/tests/unit/test_resource.js
@@ -0,0 +1,554 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Observers } = ChromeUtils.importESModule(
+ "resource://services-common/observers.sys.mjs"
+);
+const { Resource } = ChromeUtils.importESModule(
+ "resource://services-sync/resource.sys.mjs"
+);
+const { SyncAuthManager } = ChromeUtils.importESModule(
+ "resource://services-sync/sync_auth.sys.mjs"
+);
+
+var fetched = false;
+function server_open(metadata, response) {
+ let body;
+ if (metadata.method == "GET") {
+ fetched = true;
+ body = "This path exists";
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ } else {
+ body = "Wrong request method";
+ response.setStatusLine(metadata.httpVersion, 405, "Method Not Allowed");
+ }
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_protected(metadata, response) {
+ let body;
+
+ if (has_hawk_header(metadata)) {
+ body = "This path exists and is protected";
+ response.setStatusLine(metadata.httpVersion, 200, "OK, authorized");
+ response.setHeader("WWW-Authenticate", 'Basic realm="secret"', false);
+ } else {
+ body = "This path exists and is protected - failed";
+ response.setStatusLine(metadata.httpVersion, 401, "Unauthorized");
+ response.setHeader("WWW-Authenticate", 'Basic realm="secret"', false);
+ }
+
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_404(metadata, response) {
+ let body = "File not found";
+ response.setStatusLine(metadata.httpVersion, 404, "Not Found");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+var pacFetched = false;
+function server_pac(metadata, response) {
+ _("Invoked PAC handler.");
+ pacFetched = true;
+ let body = 'function FindProxyForURL(url, host) { return "DIRECT"; }';
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ response.setHeader(
+ "Content-Type",
+ "application/x-ns-proxy-autoconfig",
+ false
+ );
+ response.bodyOutputStream.write(body, body.length);
+}
+
+var sample_data = {
+ some: "sample_data",
+ injson: "format",
+ number: 42,
+};
+
+function server_upload(metadata, response) {
+ let body;
+
+ let input = readBytesFromInputStream(metadata.bodyInputStream);
+ if (input == JSON.stringify(sample_data)) {
+ body = "Valid data upload via " + metadata.method;
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ } else {
+ body = "Invalid data upload via " + metadata.method + ": " + input;
+ response.setStatusLine(metadata.httpVersion, 500, "Internal Server Error");
+ }
+
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_delete(metadata, response) {
+ let body;
+ if (metadata.method == "DELETE") {
+ body = "This resource has been deleted";
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ } else {
+ body = "Wrong request method";
+ response.setStatusLine(metadata.httpVersion, 405, "Method Not Allowed");
+ }
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_json(metadata, response) {
+ let body = JSON.stringify(sample_data);
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+const TIMESTAMP = 1274380461;
+
+function server_timestamp(metadata, response) {
+ let body = "Thank you for your request";
+ response.setHeader("X-Weave-Timestamp", "" + TIMESTAMP, false);
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_backoff(metadata, response) {
+ let body = "Hey, back off!";
+ response.setHeader("X-Weave-Backoff", "600", false);
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_quota_notice(request, response) {
+ let body = "You're approaching quota.";
+ response.setHeader("X-Weave-Quota-Remaining", "1048576", false);
+ response.setStatusLine(request.httpVersion, 200, "OK");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_quota_error(request, response) {
+ let body = "14";
+ response.setHeader("X-Weave-Quota-Remaining", "-1024", false);
+ response.setStatusLine(request.httpVersion, 400, "OK");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function server_headers(metadata, response) {
+ let ignore_headers = [
+ "host",
+ "user-agent",
+ "accept-language",
+ "accept-encoding",
+ "accept-charset",
+ "keep-alive",
+ "connection",
+ "pragma",
+ "origin",
+ "cache-control",
+ "content-length",
+ ];
+ let headers = metadata.headers;
+ let header_names = [];
+ while (headers.hasMoreElements()) {
+ let header = headers.getNext().toString();
+ if (!ignore_headers.includes(header)) {
+ header_names.push(header);
+ }
+ }
+ header_names = header_names.sort();
+
+ headers = {};
+ for (let header of header_names) {
+ headers[header] = metadata.getHeader(header);
+ }
+ let body = JSON.stringify(headers);
+ response.setStatusLine(metadata.httpVersion, 200, "OK");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+var quotaValue;
+Observers.add("weave:service:quota:remaining", function (subject) {
+ quotaValue = subject;
+});
+
+function run_test() {
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ Svc.PrefBranch.setIntPref("network.numRetries", 1); // speed up test
+ run_next_test();
+}
+
+// This apparently has to come first in order for our PAC URL to be hit.
+// Don't put any other HTTP requests earlier in the file!
+add_task(async function test_proxy_auth_redirect() {
+ _(
+ "Ensure that a proxy auth redirect (which switches out our channel) " +
+ "doesn't break Resource."
+ );
+ let server = httpd_setup({
+ "/open": server_open,
+ "/pac2": server_pac,
+ });
+
+ PACSystemSettings.PACURI = server.baseURI + "/pac2";
+ installFakePAC();
+ let res = new Resource(server.baseURI + "/open");
+ let result = await res.get();
+ Assert.ok(pacFetched);
+ Assert.ok(fetched);
+ Assert.equal("This path exists", result.data);
+ pacFetched = fetched = false;
+ uninstallFakePAC();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_new_channel() {
+ _("Ensure a redirect to a new channel is handled properly.");
+
+ let resourceRequested = false;
+ function resourceHandler(metadata, response) {
+ resourceRequested = true;
+
+ let body = "Test";
+ response.setHeader("Content-Type", "text/plain");
+ response.bodyOutputStream.write(body, body.length);
+ }
+
+ let locationURL;
+ function redirectHandler(metadata, response) {
+ let body = "Redirecting";
+ response.setStatusLine(metadata.httpVersion, 307, "TEMPORARY REDIRECT");
+ response.setHeader("Location", locationURL);
+ response.bodyOutputStream.write(body, body.length);
+ }
+
+ let server = httpd_setup({
+ "/resource": resourceHandler,
+ "/redirect": redirectHandler,
+ });
+ locationURL = server.baseURI + "/resource";
+
+ let request = new Resource(server.baseURI + "/redirect");
+ let content = await request.get();
+ Assert.ok(resourceRequested);
+ Assert.equal(200, content.status);
+ Assert.ok("content-type" in content.headers);
+ Assert.equal("text/plain", content.headers["content-type"]);
+
+ await promiseStopServer(server);
+});
+
+var server;
+
+add_test(function setup() {
+ server = httpd_setup({
+ "/open": server_open,
+ "/protected": server_protected,
+ "/404": server_404,
+ "/upload": server_upload,
+ "/delete": server_delete,
+ "/json": server_json,
+ "/timestamp": server_timestamp,
+ "/headers": server_headers,
+ "/backoff": server_backoff,
+ "/pac2": server_pac,
+ "/quota-notice": server_quota_notice,
+ "/quota-error": server_quota_error,
+ });
+
+ run_next_test();
+});
+
+add_test(function test_members() {
+ _("Resource object members");
+ let uri = server.baseURI + "/open";
+ let res = new Resource(uri);
+ Assert.ok(res.uri instanceof Ci.nsIURI);
+ Assert.equal(res.uri.spec, uri);
+ Assert.equal(res.spec, uri);
+ Assert.equal(typeof res.headers, "object");
+ Assert.equal(typeof res.authenticator, "object");
+
+ run_next_test();
+});
+
+add_task(async function test_get() {
+ _("GET a non-password-protected resource");
+ let res = new Resource(server.baseURI + "/open");
+ let content = await res.get();
+ Assert.equal(content.data, "This path exists");
+ Assert.equal(content.status, 200);
+ Assert.ok(content.success);
+
+ // Observe logging messages.
+ let resLogger = res._log;
+ let dbg = resLogger.debug;
+ let debugMessages = [];
+ resLogger.debug = function (msg, extra) {
+ debugMessages.push(`${msg}: ${JSON.stringify(extra)}`);
+ dbg.call(this, msg);
+ };
+
+ // Since we didn't receive proper JSON data, accessing content.obj
+ // will result in a SyntaxError from JSON.parse
+ let didThrow = false;
+ try {
+ content.obj;
+ } catch (ex) {
+ didThrow = true;
+ }
+ Assert.ok(didThrow);
+ Assert.equal(debugMessages.length, 1);
+ Assert.equal(
+ debugMessages[0],
+ 'Parse fail: Response body starts: "This path exists"'
+ );
+ resLogger.debug = dbg;
+});
+
+add_test(function test_basicauth() {
+ _("Test that the BasicAuthenticator doesn't screw up header case.");
+ let res1 = new Resource(server.baseURI + "/foo");
+ res1.setHeader("Authorization", "Basic foobar");
+ Assert.equal(res1._headers.authorization, "Basic foobar");
+ Assert.equal(res1.headers.authorization, "Basic foobar");
+
+ run_next_test();
+});
+
+add_task(async function test_get_protected_fail() {
+ _(
+ "GET a password protected resource (test that it'll fail w/o pass, no throw)"
+ );
+ let res2 = new Resource(server.baseURI + "/protected");
+ let content = await res2.get();
+ Assert.equal(content.data, "This path exists and is protected - failed");
+ Assert.equal(content.status, 401);
+ Assert.ok(!content.success);
+});
+
+add_task(async function test_get_protected_success() {
+ _("GET a password protected resource");
+ let identityConfig = makeIdentityConfig();
+ let syncAuthManager = new SyncAuthManager();
+ configureFxAccountIdentity(syncAuthManager, identityConfig);
+ let auth = syncAuthManager.getResourceAuthenticator();
+ let res3 = new Resource(server.baseURI + "/protected");
+ res3.authenticator = auth;
+ Assert.equal(res3.authenticator, auth);
+ let content = await res3.get();
+ Assert.equal(content.data, "This path exists and is protected");
+ Assert.equal(content.status, 200);
+ Assert.ok(content.success);
+});
+
+add_task(async function test_get_404() {
+ _("GET a non-existent resource (test that it'll fail, but not throw)");
+ let res4 = new Resource(server.baseURI + "/404");
+ let content = await res4.get();
+ Assert.equal(content.data, "File not found");
+ Assert.equal(content.status, 404);
+ Assert.ok(!content.success);
+
+ // Check some headers of the 404 response
+ Assert.equal(content.headers.connection, "close");
+ Assert.equal(content.headers.server, "httpd.js");
+ Assert.equal(content.headers["content-length"], 14);
+});
+
+add_task(async function test_put_string() {
+ _("PUT to a resource (string)");
+ let res_upload = new Resource(server.baseURI + "/upload");
+ let content = await res_upload.put(JSON.stringify(sample_data));
+ Assert.equal(content.data, "Valid data upload via PUT");
+ Assert.equal(content.status, 200);
+});
+
+add_task(async function test_put_object() {
+ _("PUT to a resource (object)");
+ let res_upload = new Resource(server.baseURI + "/upload");
+ let content = await res_upload.put(sample_data);
+ Assert.equal(content.data, "Valid data upload via PUT");
+ Assert.equal(content.status, 200);
+});
+
+add_task(async function test_post_string() {
+ _("POST to a resource (string)");
+ let res_upload = new Resource(server.baseURI + "/upload");
+ let content = await res_upload.post(JSON.stringify(sample_data));
+ Assert.equal(content.data, "Valid data upload via POST");
+ Assert.equal(content.status, 200);
+});
+
+add_task(async function test_post_object() {
+ _("POST to a resource (object)");
+ let res_upload = new Resource(server.baseURI + "/upload");
+ let content = await res_upload.post(sample_data);
+ Assert.equal(content.data, "Valid data upload via POST");
+ Assert.equal(content.status, 200);
+});
+
+add_task(async function test_delete() {
+ _("DELETE a resource");
+ let res6 = new Resource(server.baseURI + "/delete");
+ let content = await res6.delete();
+ Assert.equal(content.data, "This resource has been deleted");
+ Assert.equal(content.status, 200);
+});
+
+add_task(async function test_json_body() {
+ _("JSON conversion of response body");
+ let res7 = new Resource(server.baseURI + "/json");
+ let content = await res7.get();
+ Assert.equal(content.data, JSON.stringify(sample_data));
+ Assert.equal(content.status, 200);
+ Assert.equal(JSON.stringify(content.obj), JSON.stringify(sample_data));
+});
+
+add_task(async function test_weave_timestamp() {
+ _("X-Weave-Timestamp header updates Resource.serverTime");
+ // Before having received any response containing the
+ // X-Weave-Timestamp header, Resource.serverTime is null.
+ Assert.equal(Resource.serverTime, null);
+ let res8 = new Resource(server.baseURI + "/timestamp");
+ await res8.get();
+ Assert.equal(Resource.serverTime, TIMESTAMP);
+});
+
+add_task(async function test_get_default_headers() {
+ _("GET: Accept defaults to application/json");
+ let res_headers = new Resource(server.baseURI + "/headers");
+ let content = JSON.parse((await res_headers.get()).data);
+ Assert.equal(content.accept, "application/json;q=0.9,*/*;q=0.2");
+});
+
+add_task(async function test_put_default_headers() {
+ _(
+ "PUT: Accept defaults to application/json, Content-Type defaults to text/plain"
+ );
+ let res_headers = new Resource(server.baseURI + "/headers");
+ let content = JSON.parse((await res_headers.put("data")).data);
+ Assert.equal(content.accept, "application/json;q=0.9,*/*;q=0.2");
+ Assert.equal(content["content-type"], "text/plain");
+});
+
+add_task(async function test_post_default_headers() {
+ _(
+ "POST: Accept defaults to application/json, Content-Type defaults to text/plain"
+ );
+ let res_headers = new Resource(server.baseURI + "/headers");
+ let content = JSON.parse((await res_headers.post("data")).data);
+ Assert.equal(content.accept, "application/json;q=0.9,*/*;q=0.2");
+ Assert.equal(content["content-type"], "text/plain");
+});
+
+add_task(async function test_setHeader() {
+ _("setHeader(): setting simple header");
+ let res_headers = new Resource(server.baseURI + "/headers");
+ res_headers.setHeader("X-What-Is-Weave", "awesome");
+ Assert.equal(res_headers.headers["x-what-is-weave"], "awesome");
+ let content = JSON.parse((await res_headers.get()).data);
+ Assert.equal(content["x-what-is-weave"], "awesome");
+});
+
+add_task(async function test_setHeader_overwrite() {
+ _("setHeader(): setting multiple headers, overwriting existing header");
+ let res_headers = new Resource(server.baseURI + "/headers");
+ res_headers.setHeader("X-WHAT-is-Weave", "more awesomer");
+ res_headers.setHeader("X-Another-Header", "hello world");
+ Assert.equal(res_headers.headers["x-what-is-weave"], "more awesomer");
+ Assert.equal(res_headers.headers["x-another-header"], "hello world");
+ let content = JSON.parse((await res_headers.get()).data);
+ Assert.equal(content["x-what-is-weave"], "more awesomer");
+ Assert.equal(content["x-another-header"], "hello world");
+});
+
+add_task(async function test_put_override_content_type() {
+ _("PUT: override default Content-Type");
+ let res_headers = new Resource(server.baseURI + "/headers");
+ res_headers.setHeader("Content-Type", "application/foobar");
+ Assert.equal(res_headers.headers["content-type"], "application/foobar");
+ let content = JSON.parse((await res_headers.put("data")).data);
+ Assert.equal(content["content-type"], "application/foobar");
+});
+
+add_task(async function test_post_override_content_type() {
+ _("POST: override default Content-Type");
+ let res_headers = new Resource(server.baseURI + "/headers");
+ res_headers.setHeader("Content-Type", "application/foobar");
+ let content = JSON.parse((await res_headers.post("data")).data);
+ Assert.equal(content["content-type"], "application/foobar");
+});
+
+add_task(async function test_weave_backoff() {
+ _("X-Weave-Backoff header notifies observer");
+ let backoffInterval;
+ function onBackoff(subject, data) {
+ backoffInterval = subject;
+ }
+ Observers.add("weave:service:backoff:interval", onBackoff);
+
+ let res10 = new Resource(server.baseURI + "/backoff");
+ await res10.get();
+ Assert.equal(backoffInterval, 600);
+});
+
+add_task(async function test_quota_error() {
+ _("X-Weave-Quota-Remaining header notifies observer on successful requests.");
+ let res10 = new Resource(server.baseURI + "/quota-error");
+ let content = await res10.get();
+ Assert.equal(content.status, 400);
+ Assert.equal(quotaValue, undefined); // HTTP 400, so no observer notification.
+});
+
+add_task(async function test_quota_notice() {
+ let res10 = new Resource(server.baseURI + "/quota-notice");
+ let content = await res10.get();
+ Assert.equal(content.status, 200);
+ Assert.equal(quotaValue, 1048576);
+});
+
+add_task(async function test_preserve_exceptions() {
+ _("Error handling preserves exception information");
+ let res11 = new Resource("http://localhost:12345/does/not/exist");
+ await Assert.rejects(res11.get(), error => {
+ Assert.notEqual(error, null);
+ Assert.equal(error.result, Cr.NS_ERROR_CONNECTION_REFUSED);
+ Assert.equal(error.name, "NS_ERROR_CONNECTION_REFUSED");
+ return true;
+ });
+});
+
+add_task(async function test_timeout() {
+ _("Ensure channel timeouts are thrown appropriately.");
+ let res19 = new Resource(server.baseURI + "/json");
+ res19.ABORT_TIMEOUT = 0;
+ await Assert.rejects(res19.get(), error => {
+ Assert.equal(error.result, Cr.NS_ERROR_NET_TIMEOUT);
+ return true;
+ });
+});
+
+add_test(function test_uri_construction() {
+ _("Testing URI construction.");
+ let args = [];
+ args.push("newer=" + 1234);
+ args.push("limit=" + 1234);
+ args.push("sort=" + 1234);
+
+ let query = "?" + args.join("&");
+
+ let uri1 = CommonUtils.makeURI("http://foo/" + query).QueryInterface(
+ Ci.nsIURL
+ );
+ let uri2 = CommonUtils.makeURI("http://foo/").QueryInterface(Ci.nsIURL);
+ uri2 = uri2.mutate().setQuery(query).finalize().QueryInterface(Ci.nsIURL);
+ Assert.equal(uri1.query, uri2.query);
+
+ run_next_test();
+});
+
+/**
+ * End of tests that rely on a single HTTP server.
+ * All tests after this point must begin and end their own.
+ */
+add_test(function eliminate_server() {
+ server.stop(run_next_test);
+});
diff --git a/services/sync/tests/unit/test_resource_header.js b/services/sync/tests/unit/test_resource_header.js
new file mode 100644
index 0000000000..e45b4a9864
--- /dev/null
+++ b/services/sync/tests/unit/test_resource_header.js
@@ -0,0 +1,63 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { Resource } = ChromeUtils.importESModule(
+ "resource://services-sync/resource.sys.mjs"
+);
+
+var httpServer = new HttpServer();
+httpServer.registerPathHandler("/content", contentHandler);
+httpServer.start(-1);
+
+const HTTP_PORT = httpServer.identity.primaryPort;
+const TEST_URL = "http://localhost:" + HTTP_PORT + "/content";
+const BODY = "response body";
+
+// Keep headers for later inspection.
+var auth = null;
+var foo = null;
+function contentHandler(metadata, response) {
+ _("Handling request.");
+ auth = metadata.getHeader("Authorization");
+ foo = metadata.getHeader("X-Foo");
+
+ _("Extracted headers. " + auth + ", " + foo);
+
+ response.setHeader("Content-Type", "text/plain");
+ response.bodyOutputStream.write(BODY, BODY.length);
+}
+
+// Set a proxy function to cause an internal redirect.
+function triggerRedirect() {
+ const PROXY_FUNCTION =
+ "function FindProxyForURL(url, host) {" +
+ " return 'PROXY a_non_existent_domain_x7x6c572v:80; " +
+ "PROXY localhost:" +
+ HTTP_PORT +
+ "';" +
+ "}";
+
+ let prefs = Services.prefs.getBranch("network.proxy.");
+ prefs.setIntPref("type", 2);
+ prefs.setStringPref("autoconfig_url", "data:text/plain," + PROXY_FUNCTION);
+}
+
+add_task(async function test_headers_copied() {
+ triggerRedirect();
+
+ _("Issuing request.");
+ let resource = new Resource(TEST_URL);
+ resource.setHeader("Authorization", "Basic foobar");
+ resource.setHeader("X-Foo", "foofoo");
+
+ let result = await resource.get(TEST_URL);
+ _("Result: " + result.data);
+
+ Assert.equal(result.data, BODY);
+ Assert.equal(auth, "Basic foobar");
+ Assert.equal(foo, "foofoo");
+
+ await promiseStopServer(httpServer);
+});
diff --git a/services/sync/tests/unit/test_resource_ua.js b/services/sync/tests/unit/test_resource_ua.js
new file mode 100644
index 0000000000..115fa85b84
--- /dev/null
+++ b/services/sync/tests/unit/test_resource_ua.js
@@ -0,0 +1,96 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Resource } = ChromeUtils.importESModule(
+ "resource://services-sync/resource.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+var httpProtocolHandler = Cc[
+ "@mozilla.org/network/protocol;1?name=http"
+].getService(Ci.nsIHttpProtocolHandler);
+
+// Tracking info/collections.
+var collectionsHelper = track_collections_helper();
+
+var meta_global;
+var server;
+
+var expectedUA;
+var ua;
+function uaHandler(f) {
+ return function (request, response) {
+ ua = request.getHeader("User-Agent");
+ return f(request, response);
+ };
+}
+
+add_task(async function setup() {
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+ meta_global = new ServerWBO("global");
+ server = httpd_setup({
+ "/1.1/johndoe/info/collections": uaHandler(collectionsHelper.handler),
+ "/1.1/johndoe/storage/meta/global": uaHandler(meta_global.handler()),
+ });
+
+ await configureIdentity({ username: "johndoe" }, server);
+ _("Server URL: " + server.baseURI);
+
+ // Note this string is missing the trailing ".destkop" as the test
+ // adjusts the "client.type" pref where that portion comes from.
+ expectedUA =
+ Services.appinfo.name +
+ "/" +
+ Services.appinfo.version +
+ " (" +
+ httpProtocolHandler.oscpu +
+ ")" +
+ " FxSync/" +
+ WEAVE_VERSION +
+ "." +
+ Services.appinfo.appBuildID;
+});
+
+add_task(async function test_fetchInfo() {
+ _("Testing _fetchInfo.");
+ await Service.login();
+ await Service._fetchInfo();
+ _("User-Agent: " + ua);
+ Assert.equal(ua, expectedUA + ".desktop");
+ ua = "";
+});
+
+add_task(async function test_desktop_post() {
+ _("Testing direct Resource POST.");
+ let r = new Resource(server.baseURI + "/1.1/johndoe/storage/meta/global");
+ await r.post("foo=bar");
+ _("User-Agent: " + ua);
+ Assert.equal(ua, expectedUA + ".desktop");
+ ua = "";
+});
+
+add_task(async function test_desktop_get() {
+ _("Testing async.");
+ Svc.PrefBranch.setStringPref("client.type", "desktop");
+ let r = new Resource(server.baseURI + "/1.1/johndoe/storage/meta/global");
+ await r.get();
+ _("User-Agent: " + ua);
+ Assert.equal(ua, expectedUA + ".desktop");
+ ua = "";
+});
+
+add_task(async function test_mobile_get() {
+ _("Testing mobile.");
+ Svc.PrefBranch.setStringPref("client.type", "mobile");
+ let r = new Resource(server.baseURI + "/1.1/johndoe/storage/meta/global");
+ await r.get();
+ _("User-Agent: " + ua);
+ Assert.equal(ua, expectedUA + ".mobile");
+ ua = "";
+});
+
+add_test(function tear_down() {
+ server.stop(run_next_test);
+});
diff --git a/services/sync/tests/unit/test_score_triggers.js b/services/sync/tests/unit/test_score_triggers.js
new file mode 100644
index 0000000000..c6afa06407
--- /dev/null
+++ b/services/sync/tests/unit/test_score_triggers.js
@@ -0,0 +1,151 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+
+// Tracking info/collections.
+var collectionsHelper = track_collections_helper();
+var upd = collectionsHelper.with_updated_collection;
+
+function sync_httpd_setup() {
+ let handlers = {};
+
+ handlers["/1.1/johndoe/storage/meta/global"] = new ServerWBO(
+ "global",
+ {}
+ ).handler();
+ handlers["/1.1/johndoe/storage/steam"] = new ServerWBO("steam", {}).handler();
+
+ handlers["/1.1/johndoe/info/collections"] = collectionsHelper.handler;
+ delete collectionsHelper.collections.crypto;
+ delete collectionsHelper.collections.meta;
+
+ let cr = new ServerWBO("keys");
+ handlers["/1.1/johndoe/storage/crypto/keys"] = upd("crypto", cr.handler());
+
+ let cl = new ServerCollection();
+ handlers["/1.1/johndoe/storage/clients"] = upd("clients", cl.handler());
+
+ return httpd_setup(handlers);
+}
+
+async function setUp(server) {
+ let engineInfo = await registerRotaryEngine();
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
+ return engineInfo;
+}
+
+add_task(async function test_tracker_score_updated() {
+ enableValidationPrefs();
+ let { engine, tracker } = await registerRotaryEngine();
+
+ let scoreUpdated = 0;
+
+ function onScoreUpdated() {
+ scoreUpdated++;
+ }
+
+ Svc.Obs.add("weave:engine:score:updated", onScoreUpdated);
+
+ try {
+ Assert.equal(engine.score, 0);
+
+ tracker.score += SCORE_INCREMENT_SMALL;
+ Assert.equal(engine.score, SCORE_INCREMENT_SMALL);
+
+ Assert.equal(scoreUpdated, 1);
+ } finally {
+ Svc.Obs.remove("weave:engine:score:updated", onScoreUpdated);
+ tracker.resetScore();
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_sync_triggered() {
+ let server = sync_httpd_setup();
+ let { engine, tracker } = await setUp(server);
+
+ await Service.login();
+
+ Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
+
+ Assert.equal(Status.login, LOGIN_SUCCEEDED);
+ tracker.score += SCORE_INCREMENT_XLARGE;
+
+ await promiseOneObserver("weave:service:sync:finish");
+
+ await Service.startOver();
+ await promiseStopServer(server);
+
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+});
+
+add_task(async function test_clients_engine_sync_triggered() {
+ enableValidationPrefs();
+
+ _("Ensure that client engine score changes trigger a sync.");
+
+ // The clients engine is not registered like other engines. Therefore,
+ // it needs special treatment throughout the code. Here, we verify the
+ // global score tracker gives it that treatment. See bug 676042 for more.
+
+ let server = sync_httpd_setup();
+ let { engine, tracker } = await setUp(server);
+ await Service.login();
+
+ Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
+ Assert.equal(Status.login, LOGIN_SUCCEEDED);
+ Service.clientsEngine._tracker.score += SCORE_INCREMENT_XLARGE;
+
+ await promiseOneObserver("weave:service:sync:finish");
+ _("Sync due to clients engine change completed.");
+
+ await Service.startOver();
+ await promiseStopServer(server);
+
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+});
+
+add_task(async function test_incorrect_credentials_sync_not_triggered() {
+ enableValidationPrefs();
+
+ _(
+ "Ensure that score changes don't trigger a sync if Status.login != LOGIN_SUCCEEDED."
+ );
+ let server = sync_httpd_setup();
+ let { engine, tracker } = await setUp(server);
+
+ // Ensure we don't actually try to sync.
+ function onSyncStart() {
+ do_throw("Should not get here!");
+ }
+ Svc.Obs.add("weave:service:sync:start", onSyncStart);
+
+ // Faking incorrect credentials to prevent score update.
+ Status.login = LOGIN_FAILED_LOGIN_REJECTED;
+ tracker.score += SCORE_INCREMENT_XLARGE;
+
+ // First wait >100ms (nsITimers can take up to that much time to fire, so
+ // we can account for the timer in delayedAutoconnect) and then one event
+ // loop tick (to account for a possible call to weave:service:sync:start).
+ await promiseNamedTimer(150, {}, "timer");
+ await Async.promiseYield();
+
+ Svc.Obs.remove("weave:service:sync:start", onSyncStart);
+
+ Assert.equal(Status.login, LOGIN_FAILED_LOGIN_REJECTED);
+
+ await Service.startOver();
+ await promiseStopServer(server);
+
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+});
diff --git a/services/sync/tests/unit/test_service_attributes.js b/services/sync/tests/unit/test_service_attributes.js
new file mode 100644
index 0000000000..86ca3c30c0
--- /dev/null
+++ b/services/sync/tests/unit/test_service_attributes.js
@@ -0,0 +1,92 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { FakeGUIDService } = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/fakeservices.sys.mjs"
+);
+
+add_task(async function test_urls() {
+ _("URL related Service properties correspond to preference settings.");
+ try {
+ Assert.equal(Service.clusterURL, "");
+ Assert.ok(!Service.userBaseURL);
+ Assert.equal(Service.infoURL, undefined);
+ Assert.equal(Service.storageURL, undefined);
+ Assert.equal(Service.metaURL, undefined);
+
+ _("The 'clusterURL' attribute updates preferences and cached URLs.");
+
+ // Since we don't have a cluster URL yet, these will still not be defined.
+ Assert.equal(Service.infoURL, undefined);
+ Assert.ok(!Service.userBaseURL);
+ Assert.equal(Service.storageURL, undefined);
+ Assert.equal(Service.metaURL, undefined);
+
+ Service.clusterURL = "http://weave.cluster/1.1/johndoe/";
+
+ Assert.equal(Service.userBaseURL, "http://weave.cluster/1.1/johndoe/");
+ Assert.equal(
+ Service.infoURL,
+ "http://weave.cluster/1.1/johndoe/info/collections"
+ );
+ Assert.equal(
+ Service.storageURL,
+ "http://weave.cluster/1.1/johndoe/storage/"
+ );
+ Assert.equal(
+ Service.metaURL,
+ "http://weave.cluster/1.1/johndoe/storage/meta/global"
+ );
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_test(function test_syncID() {
+ _("Service.syncID is auto-generated, corresponds to preference.");
+ new FakeGUIDService();
+
+ try {
+ // Ensure pristine environment
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("client.syncID"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+
+ // Performing the first get on the attribute will generate a new GUID.
+ Assert.equal(Service.syncID, "fake-guid-00");
+ Assert.equal(Svc.PrefBranch.getStringPref("client.syncID"), "fake-guid-00");
+
+ Svc.PrefBranch.setStringPref("client.syncID", Utils.makeGUID());
+ Assert.equal(Svc.PrefBranch.getStringPref("client.syncID"), "fake-guid-01");
+ Assert.equal(Service.syncID, "fake-guid-01");
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ new FakeGUIDService();
+ run_next_test();
+ }
+});
+
+add_test(function test_locked() {
+ _("The 'locked' attribute can be toggled with lock() and unlock()");
+
+ // Defaults to false
+ Assert.equal(Service.locked, false);
+
+ Assert.equal(Service.lock(), true);
+ Assert.equal(Service.locked, true);
+
+ // Locking again will return false
+ Assert.equal(Service.lock(), false);
+
+ Service.unlock();
+ Assert.equal(Service.locked, false);
+ run_next_test();
+});
diff --git a/services/sync/tests/unit/test_service_cluster.js b/services/sync/tests/unit/test_service_cluster.js
new file mode 100644
index 0000000000..b4c14f910d
--- /dev/null
+++ b/services/sync/tests/unit/test_service_cluster.js
@@ -0,0 +1,61 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function test_findCluster() {
+ syncTestLogging();
+ _("Test Service._findCluster()");
+ try {
+ let whenReadyToAuthenticate = Promise.withResolvers();
+ Service.identity.whenReadyToAuthenticate = whenReadyToAuthenticate;
+ whenReadyToAuthenticate.resolve(true);
+
+ Service.identity._ensureValidToken = () =>
+ Promise.reject(new Error("Connection refused"));
+
+ _("_findCluster() throws on network errors (e.g. connection refused).");
+ await Assert.rejects(Service.identity._findCluster(), /Connection refused/);
+
+ Service.identity._ensureValidToken = () =>
+ Promise.resolve({ endpoint: "http://weave.user.node" });
+
+ _("_findCluster() returns the user's cluster node");
+ let cluster = await Service.identity._findCluster();
+ Assert.equal(cluster, "http://weave.user.node/");
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_setCluster() {
+ syncTestLogging();
+ _("Test Service._setCluster()");
+ try {
+ _("Check initial state.");
+ Assert.equal(Service.clusterURL, "");
+
+ Service.identity._findCluster = () => "http://weave.user.node/";
+
+ _("Set the cluster URL.");
+ Assert.ok(await Service.identity.setCluster());
+ Assert.equal(Service.clusterURL, "http://weave.user.node/");
+
+ _("Setting it again won't make a difference if it's the same one.");
+ Assert.ok(!(await Service.identity.setCluster()));
+ Assert.equal(Service.clusterURL, "http://weave.user.node/");
+
+ _("A 'null' response won't make a difference either.");
+ Service.identity._findCluster = () => null;
+ Assert.ok(!(await Service.identity.setCluster()));
+ Assert.equal(Service.clusterURL, "http://weave.user.node/");
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
diff --git a/services/sync/tests/unit/test_service_detect_upgrade.js b/services/sync/tests/unit/test_service_detect_upgrade.js
new file mode 100644
index 0000000000..d0db19af93
--- /dev/null
+++ b/services/sync/tests/unit/test_service_detect_upgrade.js
@@ -0,0 +1,274 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { CryptoWrapper, WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function v4_upgrade() {
+ enableValidationPrefs();
+
+ let clients = new ServerCollection();
+ let meta_global = new ServerWBO("global");
+
+ // Tracking info/collections.
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+ let collections = collectionsHelper.collections;
+
+ let keysWBO = new ServerWBO("keys");
+ let server = httpd_setup({
+ // Special.
+ "/1.1/johndoe/info/collections": collectionsHelper.handler,
+ "/1.1/johndoe/storage/crypto/keys": upd("crypto", keysWBO.handler()),
+ "/1.1/johndoe/storage/meta/global": upd("meta", meta_global.handler()),
+
+ // Track modified times.
+ "/1.1/johndoe/storage/clients": upd("clients", clients.handler()),
+ "/1.1/johndoe/storage/tabs": upd("tabs", new ServerCollection().handler()),
+
+ // Just so we don't get 404s in the logs.
+ "/1.1/johndoe/storage/bookmarks": new ServerCollection().handler(),
+ "/1.1/johndoe/storage/forms": new ServerCollection().handler(),
+ "/1.1/johndoe/storage/history": new ServerCollection().handler(),
+ "/1.1/johndoe/storage/passwords": new ServerCollection().handler(),
+ "/1.1/johndoe/storage/prefs": new ServerCollection().handler(),
+ });
+
+ try {
+ Service.status.resetSync();
+
+ _("Logging in.");
+
+ await configureIdentity({ username: "johndoe" }, server);
+
+ await Service.login();
+ Assert.ok(Service.isLoggedIn);
+ await Service.verifyAndFetchSymmetricKeys();
+ Assert.ok(await Service._remoteSetup());
+
+ async function test_out_of_date() {
+ _("Old meta/global: " + JSON.stringify(meta_global));
+ meta_global.payload = JSON.stringify({
+ syncID: "foooooooooooooooooooooooooo",
+ storageVersion: STORAGE_VERSION + 1,
+ });
+ collections.meta = Date.now() / 1000;
+ _("New meta/global: " + JSON.stringify(meta_global));
+ Service.recordManager.set(Service.metaURL, meta_global);
+ try {
+ await Service.sync();
+ } catch (ex) {}
+ Assert.equal(Service.status.sync, VERSION_OUT_OF_DATE);
+ }
+
+ // See what happens when we bump the storage version.
+ _("Syncing after server has been upgraded.");
+ await test_out_of_date();
+
+ // Same should happen after a wipe.
+ _("Syncing after server has been upgraded and wiped.");
+ await Service.wipeServer();
+ await test_out_of_date();
+
+ // Now's a great time to test what happens when keys get replaced.
+ _("Syncing afresh...");
+ Service.logout();
+ Service.collectionKeys.clear();
+ meta_global.payload = JSON.stringify({
+ syncID: "foooooooooooooobbbbbbbbbbbb",
+ storageVersion: STORAGE_VERSION,
+ });
+ collections.meta = Date.now() / 1000;
+ Service.recordManager.set(Service.metaURL, meta_global);
+ await Service.login();
+ Assert.ok(Service.isLoggedIn);
+ await Service.sync();
+ Assert.ok(Service.isLoggedIn);
+
+ let serverDecrypted;
+ let serverKeys;
+ let serverResp;
+
+ async function retrieve_server_default() {
+ serverKeys = serverResp = serverDecrypted = null;
+
+ serverKeys = new CryptoWrapper("crypto", "keys");
+ serverResp = (
+ await serverKeys.fetch(Service.resource(Service.cryptoKeysURL))
+ ).response;
+ Assert.ok(serverResp.success);
+
+ serverDecrypted = await serverKeys.decrypt(
+ Service.identity.syncKeyBundle
+ );
+ _("Retrieved WBO: " + JSON.stringify(serverDecrypted));
+ _("serverKeys: " + JSON.stringify(serverKeys));
+
+ return serverDecrypted.default;
+ }
+
+ async function retrieve_and_compare_default(should_succeed) {
+ let serverDefault = await retrieve_server_default();
+ let localDefault = Service.collectionKeys.keyForCollection().keyPairB64;
+
+ _("Retrieved keyBundle: " + JSON.stringify(serverDefault));
+ _("Local keyBundle: " + JSON.stringify(localDefault));
+
+ if (should_succeed) {
+ Assert.equal(
+ JSON.stringify(serverDefault),
+ JSON.stringify(localDefault)
+ );
+ } else {
+ Assert.notEqual(
+ JSON.stringify(serverDefault),
+ JSON.stringify(localDefault)
+ );
+ }
+ }
+
+ // Uses the objects set above.
+ async function set_server_keys(pair) {
+ serverDecrypted.default = pair;
+ serverKeys.cleartext = serverDecrypted;
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ await serverKeys.upload(Service.resource(Service.cryptoKeysURL));
+ }
+
+ _("Checking we have the latest keys.");
+ await retrieve_and_compare_default(true);
+
+ _("Update keys on server.");
+ await set_server_keys([
+ "KaaaaaaaaaaaHAtfmuRY0XEJ7LXfFuqvF7opFdBD/MY=",
+ "aaaaaaaaaaaapxMO6TEWtLIOv9dj6kBAJdzhWDkkkis=",
+ ]);
+
+ _("Checking that we no longer have the latest keys.");
+ await retrieve_and_compare_default(false);
+
+ _("Indeed, they're what we set them to...");
+ Assert.equal(
+ "KaaaaaaaaaaaHAtfmuRY0XEJ7LXfFuqvF7opFdBD/MY=",
+ (await retrieve_server_default())[0]
+ );
+
+ _("Sync. Should download changed keys automatically.");
+ let oldClientsModified = collections.clients;
+ let oldTabsModified = collections.tabs;
+
+ await Service.login();
+ await Service.sync();
+ _("New key should have forced upload of data.");
+ _("Tabs: " + oldTabsModified + " < " + collections.tabs);
+ _("Clients: " + oldClientsModified + " < " + collections.clients);
+ Assert.ok(collections.clients > oldClientsModified);
+ Assert.ok(collections.tabs > oldTabsModified);
+
+ _("... and keys will now match.");
+ await retrieve_and_compare_default(true);
+
+ // Clean up.
+ await Service.startOver();
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function v5_upgrade() {
+ enableValidationPrefs();
+
+ // Tracking info/collections.
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+
+ let keysWBO = new ServerWBO("keys");
+ let bulkWBO = new ServerWBO("bulk");
+ let clients = new ServerCollection();
+ let meta_global = new ServerWBO("global");
+
+ let server = httpd_setup({
+ // Special.
+ "/1.1/johndoe/storage/meta/global": upd("meta", meta_global.handler()),
+ "/1.1/johndoe/info/collections": collectionsHelper.handler,
+ "/1.1/johndoe/storage/crypto/keys": upd("crypto", keysWBO.handler()),
+ "/1.1/johndoe/storage/crypto/bulk": upd("crypto", bulkWBO.handler()),
+
+ // Track modified times.
+ "/1.1/johndoe/storage/clients": upd("clients", clients.handler()),
+ "/1.1/johndoe/storage/tabs": upd("tabs", new ServerCollection().handler()),
+ });
+
+ try {
+ Service.status.resetSync();
+
+ Service.clusterURL = server.baseURI + "/";
+
+ await configureIdentity({ username: "johndoe" }, server);
+
+ // Test an upgrade where the contents of the server would cause us to error
+ // -- keys decrypted with a different sync key, for example.
+ _("Testing v4 -> v5 (or similar) upgrade.");
+ async function update_server_keys(syncKeyBundle, wboName, collWBO) {
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", wboName);
+ await serverKeys.encrypt(syncKeyBundle);
+ let res = Service.resource(Service.storageURL + collWBO);
+ Assert.ok((await serverKeys.upload(res)).success);
+ }
+
+ _("Bumping version.");
+ // Bump version on the server.
+ let m = new WBORecord("meta", "global");
+ m.payload = {
+ syncID: "foooooooooooooooooooooooooo",
+ storageVersion: STORAGE_VERSION + 1,
+ };
+ await m.upload(Service.resource(Service.metaURL));
+
+ _("New meta/global: " + JSON.stringify(meta_global));
+
+ // Fill the keys with bad data.
+ let badKeys = new BulkKeyBundle("crypto");
+ await badKeys.generateRandom();
+ await update_server_keys(badKeys, "keys", "crypto/keys"); // v4
+ await update_server_keys(badKeys, "bulk", "crypto/bulk"); // v5
+
+ _("Generating new keys.");
+ await generateNewKeys(Service.collectionKeys);
+
+ // Now sync and see what happens. It should be a version fail, not a crypto
+ // fail.
+
+ _("Logging in.");
+ try {
+ await Service.login();
+ } catch (e) {
+ _("Exception: " + e);
+ }
+ _("Status: " + Service.status);
+ Assert.ok(!Service.isLoggedIn);
+ Assert.equal(VERSION_OUT_OF_DATE, Service.status.sync);
+
+ // Clean up.
+ await Service.startOver();
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
+
+function run_test() {
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ run_next_test();
+}
diff --git a/services/sync/tests/unit/test_service_login.js b/services/sync/tests/unit/test_service_login.js
new file mode 100644
index 0000000000..c75799d38e
--- /dev/null
+++ b/services/sync/tests/unit/test_service_login.js
@@ -0,0 +1,224 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+function login_handling(handler) {
+ return function (request, response) {
+ if (has_hawk_header(request)) {
+ handler(request, response);
+ } else {
+ let body = "Unauthorized";
+ response.setStatusLine(request.httpVersion, 401, "Unauthorized");
+ response.setHeader("Content-Type", "text/plain");
+ response.bodyOutputStream.write(body, body.length);
+ }
+ };
+}
+
+add_task(async function test_offline() {
+ try {
+ _("The right bits are set when we're offline.");
+ Services.io.offline = true;
+ Assert.ok(!(await Service.login()));
+ Assert.equal(Service.status.login, LOGIN_FAILED_NETWORK_ERROR);
+ Services.io.offline = false;
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+function setup() {
+ let janeHelper = track_collections_helper();
+ let janeU = janeHelper.with_updated_collection;
+ let johnHelper = track_collections_helper();
+ let johnU = johnHelper.with_updated_collection;
+
+ let server = httpd_setup({
+ "/1.1/johndoe/info/collections": login_handling(johnHelper.handler),
+ "/1.1/janedoe/info/collections": login_handling(janeHelper.handler),
+
+ // We need these handlers because we test login, and login
+ // is where keys are generated or fetched.
+ // TODO: have Jane fetch her keys, not generate them...
+ "/1.1/johndoe/storage/crypto/keys": johnU(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/meta/global": johnU(
+ "meta",
+ new ServerWBO("global").handler()
+ ),
+ "/1.1/janedoe/storage/crypto/keys": janeU(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/janedoe/storage/meta/global": janeU(
+ "meta",
+ new ServerWBO("global").handler()
+ ),
+ });
+
+ return server;
+}
+
+add_task(async function test_not_logged_in() {
+ let server = setup();
+ try {
+ await Service.login();
+ Assert.ok(!Service.isLoggedIn, "no user configured, so can't be logged in");
+ Assert.equal(Service._checkSync(), kSyncNotConfigured);
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_login_logout() {
+ enableValidationPrefs();
+
+ let server = setup();
+
+ try {
+ _("Force the initial state.");
+ Service.status.service = STATUS_OK;
+ Assert.equal(Service.status.service, STATUS_OK);
+
+ _("Try logging in. It won't work because we're not configured yet.");
+ await Service.login();
+ Assert.equal(Service.status.service, CLIENT_NOT_CONFIGURED);
+ Assert.equal(Service.status.login, LOGIN_FAILED_NO_USERNAME);
+ Assert.ok(!Service.isLoggedIn);
+
+ _("Try again with a configured account");
+ await configureIdentity({ username: "johndoe" }, server);
+ await Service.login();
+ Assert.equal(Service.status.service, STATUS_OK);
+ Assert.equal(Service.status.login, LOGIN_SUCCEEDED);
+ Assert.ok(Service.isLoggedIn);
+
+ _("Logout.");
+ Service.logout();
+ Assert.ok(!Service.isLoggedIn);
+
+ _("Logging out again won't do any harm.");
+ Service.logout();
+ Assert.ok(!Service.isLoggedIn);
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_login_on_sync() {
+ enableValidationPrefs();
+
+ let server = setup();
+ await configureIdentity({ username: "johndoe" }, server);
+
+ try {
+ _("Sync calls login.");
+ let oldLogin = Service.login;
+ let loginCalled = false;
+ Service.login = async function () {
+ loginCalled = true;
+ Service.status.login = LOGIN_SUCCEEDED;
+ this._loggedIn = false; // So that sync aborts.
+ return true;
+ };
+
+ await Service.sync();
+
+ Assert.ok(loginCalled);
+ Service.login = oldLogin;
+
+ // Stub mpLocked.
+ let mpLocked = true;
+ Utils.mpLocked = () => mpLocked;
+
+ // Stub scheduleNextSync. This gets called within checkSyncStatus if we're
+ // ready to sync, so use it as an indicator.
+ let scheduleNextSyncF = Service.scheduler.scheduleNextSync;
+ let scheduleCalled = false;
+ Service.scheduler.scheduleNextSync = function (wait) {
+ scheduleCalled = true;
+ scheduleNextSyncF.call(this, wait);
+ };
+
+ // Autoconnect still tries to connect in the background (useful behavior:
+ // for non-MP users and unlocked MPs, this will detect version expiry
+ // earlier).
+ //
+ // Consequently, non-MP users will be logged in as in the pre-Bug 543784 world,
+ // and checkSyncStatus reflects that by waiting for login.
+ //
+ // This process doesn't apply if your MP is still locked, so we make
+ // checkSyncStatus accept a locked MP in place of being logged in.
+ //
+ // This test exercises these two branches.
+
+ _("We're ready to sync if locked.");
+ Service.enabled = true;
+ Services.io.offline = false;
+ Service.scheduler.checkSyncStatus();
+ Assert.ok(scheduleCalled);
+
+ _("... and also if we're not locked.");
+ scheduleCalled = false;
+ mpLocked = false;
+ Service.scheduler.checkSyncStatus();
+ Assert.ok(scheduleCalled);
+ Service.scheduler.scheduleNextSync = scheduleNextSyncF;
+
+ // TODO: need better tests around master password prompting. See Bug 620583.
+
+ mpLocked = true;
+
+ // Testing exception handling if master password dialog is canceled.
+ // Do this by monkeypatching.
+ Service.identity.unlockAndVerifyAuthState = () =>
+ Promise.resolve(MASTER_PASSWORD_LOCKED);
+
+ let cSTCalled = false;
+ let lockedSyncCalled = false;
+
+ Service.scheduler.clearSyncTriggers = function () {
+ cSTCalled = true;
+ };
+ Service._lockedSync = async function () {
+ lockedSyncCalled = true;
+ };
+
+ _("If master password is canceled, login fails and we report lockage.");
+ Assert.ok(!(await Service.login()));
+ Assert.equal(Service.status.login, MASTER_PASSWORD_LOCKED);
+ Assert.equal(Service.status.service, LOGIN_FAILED);
+ _("Locked? " + Utils.mpLocked());
+ _("checkSync reports the correct term.");
+ Assert.equal(Service._checkSync(), kSyncMasterPasswordLocked);
+
+ _("Sync doesn't proceed and clears triggers if MP is still locked.");
+ await Service.sync();
+
+ Assert.ok(cSTCalled);
+ Assert.ok(!lockedSyncCalled);
+
+ // N.B., a bunch of methods are stubbed at this point. Be careful putting
+ // new tests after this point!
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_service_startOver.js b/services/sync/tests/unit/test_service_startOver.js
new file mode 100644
index 0000000000..22d92c76ef
--- /dev/null
+++ b/services/sync/tests/unit/test_service_startOver.js
@@ -0,0 +1,91 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function BlaEngine() {
+ SyncEngine.call(this, "Bla", Service);
+}
+BlaEngine.prototype = {
+ removed: false,
+ async removeClientData() {
+ this.removed = true;
+ },
+};
+Object.setPrototypeOf(BlaEngine.prototype, SyncEngine.prototype);
+
+add_task(async function setup() {
+ await Service.engineManager.register(BlaEngine);
+});
+
+add_task(async function test_resetLocalData() {
+ await configureIdentity();
+ Service.status.enforceBackoff = true;
+ Service.status.backoffInterval = 42;
+ Service.status.minimumNextSync = 23;
+
+ // Verify set up.
+ Assert.equal(Service.status.checkSetup(), STATUS_OK);
+
+ // Verify state that the observer sees.
+ let observerCalled = false;
+ Svc.Obs.add("weave:service:start-over", function onStartOver() {
+ Svc.Obs.remove("weave:service:start-over", onStartOver);
+ observerCalled = true;
+
+ Assert.equal(Service.status.service, CLIENT_NOT_CONFIGURED);
+ });
+
+ await Service.startOver();
+ Assert.ok(observerCalled);
+
+ // Verify the site was nuked from orbit.
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("username"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+
+ Assert.equal(Service.status.service, CLIENT_NOT_CONFIGURED);
+ Assert.ok(!Service.status.enforceBackoff);
+ Assert.equal(Service.status.backoffInterval, 0);
+ Assert.equal(Service.status.minimumNextSync, 0);
+});
+
+add_task(async function test_removeClientData() {
+ let engine = Service.engineManager.get("bla");
+
+ // No cluster URL = no removal.
+ Assert.ok(!engine.removed);
+ await Service.startOver();
+ Assert.ok(!engine.removed);
+
+ Service.clusterURL = "https://localhost/";
+
+ Assert.ok(!engine.removed);
+ await Service.startOver();
+ Assert.ok(engine.removed);
+});
+
+add_task(async function test_reset_SyncScheduler() {
+ // Some non-default values for SyncScheduler's attributes.
+ Service.scheduler.idle = true;
+ Service.scheduler.hasIncomingItems = true;
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 42);
+ Service.scheduler.nextSync = Date.now();
+ Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
+ Service.scheduler.syncInterval = Service.scheduler.activeInterval;
+
+ await Service.startOver();
+
+ Assert.ok(!Service.scheduler.idle);
+ Assert.ok(!Service.scheduler.hasIncomingItems);
+ Assert.equal(Service.scheduler.numClients, 0);
+ Assert.equal(Service.scheduler.nextSync, 0);
+ Assert.equal(Service.scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
+ Assert.equal(
+ Service.scheduler.syncInterval,
+ Service.scheduler.singleDeviceInterval
+ );
+});
diff --git a/services/sync/tests/unit/test_service_startup.js b/services/sync/tests/unit/test_service_startup.js
new file mode 100644
index 0000000000..66623a951a
--- /dev/null
+++ b/services/sync/tests/unit/test_service_startup.js
@@ -0,0 +1,60 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { AppConstants } = ChromeUtils.importESModule(
+ "resource://gre/modules/AppConstants.sys.mjs"
+);
+
+// Svc.PrefBranch.setStringPref("services.sync.log.appender.dump", "All");
+Svc.PrefBranch.setStringPref("registerEngines", "Tab,Bookmarks,Form,History");
+
+add_task(async function run_test() {
+ validate_all_future_pings();
+ _("When imported, Service.onStartup is called");
+
+ let xps = Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+ Assert.ok(!xps.enabled);
+
+ // Test fixtures
+ let { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+ );
+ Services.prefs.setStringPref("services.sync.username", "johndoe");
+ Assert.ok(xps.enabled);
+
+ _("Service is enabled.");
+ Assert.equal(Service.enabled, true);
+
+ _("Observers are notified of startup");
+ Assert.ok(!Service.status.ready);
+ Assert.ok(!xps.ready);
+
+ await promiseOneObserver("weave:service:ready");
+
+ Assert.ok(Service.status.ready);
+ Assert.ok(xps.ready);
+
+ _("Engines are registered.");
+ let engines = Service.engineManager.getAll();
+ if (AppConstants.MOZ_APP_NAME == "thunderbird") {
+ // Thunderbird's engines are registered later, so they're not here yet.
+ Assert.deepEqual(
+ engines.map(engine => engine.name),
+ []
+ );
+ } else {
+ Assert.deepEqual(
+ engines.map(engine => engine.name),
+ ["tabs", "bookmarks", "forms", "history"]
+ );
+ }
+
+ // Clean up.
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+
+ do_test_finished();
+});
diff --git a/services/sync/tests/unit/test_service_sync_401.js b/services/sync/tests/unit/test_service_sync_401.js
new file mode 100644
index 0000000000..a0bde0b0ab
--- /dev/null
+++ b/services/sync/tests/unit/test_service_sync_401.js
@@ -0,0 +1,90 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function login_handling(handler) {
+ return function (request, response) {
+ if (
+ request.hasHeader("Authorization") &&
+ request.getHeader("Authorization").includes('Hawk id="id"')
+ ) {
+ handler(request, response);
+ } else {
+ let body = "Unauthorized";
+ response.setStatusLine(request.httpVersion, 401, "Unauthorized");
+ response.bodyOutputStream.write(body, body.length);
+ }
+ };
+}
+
+add_task(async function run_test() {
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+
+ let server = httpd_setup({
+ "/1.1/johndoe/storage/crypto/keys": upd(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/meta/global": upd(
+ "meta",
+ new ServerWBO("global").handler()
+ ),
+ "/1.1/johndoe/info/collections": login_handling(collectionsHelper.handler),
+ });
+
+ const GLOBAL_SCORE = 42;
+
+ try {
+ _("Set up test fixtures.");
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
+ Service.scheduler.globalScore = GLOBAL_SCORE;
+ // Avoid daily ping
+ Svc.PrefBranch.setIntPref("lastPing", Math.floor(Date.now() / 1000));
+
+ let threw = false;
+ Svc.Obs.add("weave:service:sync:error", function (subject, data) {
+ threw = true;
+ });
+
+ _("Initial state: We're successfully logged in.");
+ await Service.login();
+ Assert.ok(Service.isLoggedIn);
+ Assert.equal(Service.status.login, LOGIN_SUCCEEDED);
+
+ _("Simulate having changed the password somewhere else.");
+ Service.identity._token.id = "somethingelse";
+ Service.identity.unlockAndVerifyAuthState = () =>
+ Promise.resolve(LOGIN_FAILED_LOGIN_REJECTED);
+
+ _("Let's try to sync.");
+ await Service.sync();
+
+ _("Verify that sync() threw an exception.");
+ Assert.ok(threw);
+
+ _("We're no longer logged in.");
+ Assert.ok(!Service.isLoggedIn);
+
+ _("Sync status won't have changed yet, because we haven't tried again.");
+
+ _("globalScore is reset upon starting a sync.");
+ Assert.equal(Service.scheduler.globalScore, 0);
+
+ _("Our next sync will fail appropriately.");
+ try {
+ await Service.sync();
+ } catch (ex) {}
+ Assert.equal(Service.status.login, LOGIN_FAILED_LOGIN_REJECTED);
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_service_sync_locked.js b/services/sync/tests/unit/test_service_sync_locked.js
new file mode 100644
index 0000000000..5a872e2708
--- /dev/null
+++ b/services/sync/tests/unit/test_service_sync_locked.js
@@ -0,0 +1,47 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function run_test() {
+ validate_all_future_pings();
+ let debug = [];
+ let info = [];
+
+ function augmentLogger(old) {
+ let d = old.debug;
+ let i = old.info;
+ // For the purposes of this test we don't need to do full formatting
+ // of the 2nd param, as the ones we care about are always strings.
+ old.debug = function (m, p) {
+ debug.push(p ? m + ": " + (p.message || p) : m);
+ d.call(old, m, p);
+ };
+ old.info = function (m, p) {
+ info.push(p ? m + ": " + (p.message || p) : m);
+ i.call(old, m, p);
+ };
+ return old;
+ }
+
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ augmentLogger(Service._log);
+
+ // Avoid daily ping
+ Svc.PrefBranch.setIntPref("lastPing", Math.floor(Date.now() / 1000));
+
+ _("Check that sync will log appropriately if already in 'progress'.");
+ Service._locked = true;
+ await Service.sync();
+ Service._locked = false;
+
+ Assert.ok(
+ debug[debug.length - 2].startsWith(
+ 'Exception calling WrappedLock: Could not acquire lock. Label: "service.js: login".'
+ )
+ );
+ Assert.equal(info[info.length - 1], "Cannot start sync: already syncing?");
+});
diff --git a/services/sync/tests/unit/test_service_sync_remoteSetup.js b/services/sync/tests/unit/test_service_sync_remoteSetup.js
new file mode 100644
index 0000000000..ec95e69c78
--- /dev/null
+++ b/services/sync/tests/unit/test_service_sync_remoteSetup.js
@@ -0,0 +1,241 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+// This sucks, but this test fails if this engine is enabled, due to dumb
+// things that aren't related to this engine. In short:
+// * Because the addon manager isn't initialized, the addons engine fails to
+// initialize. So we end up writing a meta/global with `extension-storage`
+// but not addons.
+// * After we sync, we discover 'addons' is locally enabled, but because it's
+// not in m/g, we decide it's been remotely declined (and it decides this
+// without even considering `declined`). So we disable 'addons'.
+// * Disabling 'addons' means 'extension-storage' is disabled - but because
+// that *is* in meta/global we re-update meta/global to remove it.
+// * This test fails due to the extra, unexpected update of m/g.
+//
+// Another option would be to ensure the addons manager is initialized, but
+// that's a larger patch and still isn't strictly relevant to what's being
+// tested here, so...
+Services.prefs.setBoolPref(
+ "services.sync.engine.extension-storage.force",
+ false
+);
+
+add_task(async function run_test() {
+ enableValidationPrefs();
+
+ validate_all_future_pings();
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ let clients = new ServerCollection();
+ let meta_global = new ServerWBO("global");
+
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+ let collections = collectionsHelper.collections;
+
+ function wasCalledHandler(wbo) {
+ let handler = wbo.handler();
+ return function () {
+ wbo.wasCalled = true;
+ handler.apply(this, arguments);
+ };
+ }
+
+ let keysWBO = new ServerWBO("keys");
+ let cryptoColl = new ServerCollection({ keys: keysWBO });
+ let metaColl = new ServerCollection({ global: meta_global });
+ do_test_pending();
+
+ /**
+ * Handle the bulk DELETE request sent by wipeServer.
+ */
+ function storageHandler(request, response) {
+ Assert.equal("DELETE", request.method);
+ Assert.ok(request.hasHeader("X-Confirm-Delete"));
+
+ _("Wiping out all collections.");
+ cryptoColl.delete({});
+ clients.delete({});
+ metaColl.delete({});
+
+ let ts = new_timestamp();
+ collectionsHelper.update_collection("crypto", ts);
+ collectionsHelper.update_collection("clients", ts);
+ collectionsHelper.update_collection("meta", ts);
+ return_timestamp(request, response, ts);
+ }
+
+ const GLOBAL_PATH = "/1.1/johndoe/storage/meta/global";
+
+ let handlers = {
+ "/1.1/johndoe/storage": storageHandler,
+ "/1.1/johndoe/storage/crypto/keys": upd("crypto", keysWBO.handler()),
+ "/1.1/johndoe/storage/crypto": upd("crypto", cryptoColl.handler()),
+ "/1.1/johndoe/storage/clients": upd("clients", clients.handler()),
+ "/1.1/johndoe/storage/meta": upd("meta", wasCalledHandler(metaColl)),
+ "/1.1/johndoe/storage/meta/global": upd(
+ "meta",
+ wasCalledHandler(meta_global)
+ ),
+ "/1.1/johndoe/info/collections": collectionsHelper.handler,
+ };
+
+ function mockHandler(path, mock) {
+ server.registerPathHandler(path, mock(handlers[path]));
+ return {
+ restore() {
+ server.registerPathHandler(path, handlers[path]);
+ },
+ };
+ }
+
+ let server = httpd_setup(handlers);
+
+ try {
+ _("Checking Status.sync with no credentials.");
+ await Service.verifyAndFetchSymmetricKeys();
+ Assert.equal(Service.status.sync, CREDENTIALS_CHANGED);
+ Assert.equal(Service.status.login, LOGIN_FAILED_NO_PASSPHRASE);
+
+ await configureIdentity({ username: "johndoe" }, server);
+
+ await Service.login();
+ _("Checking that remoteSetup returns true when credentials have changed.");
+ (await Service.recordManager.get(Service.metaURL)).payload.syncID =
+ "foobar";
+ Assert.ok(await Service._remoteSetup());
+
+ let returnStatusCode = (method, code) => oldMethod => (req, res) => {
+ if (req.method === method) {
+ res.setStatusLine(req.httpVersion, code, "");
+ } else {
+ oldMethod(req, res);
+ }
+ };
+
+ let mock = mockHandler(GLOBAL_PATH, returnStatusCode("GET", 401));
+ Service.recordManager.del(Service.metaURL);
+ _(
+ "Checking that remoteSetup returns false on 401 on first get /meta/global."
+ );
+ Assert.equal(false, await Service._remoteSetup());
+ mock.restore();
+
+ await Service.login();
+ mock = mockHandler(GLOBAL_PATH, returnStatusCode("GET", 503));
+ Service.recordManager.del(Service.metaURL);
+ _(
+ "Checking that remoteSetup returns false on 503 on first get /meta/global."
+ );
+ Assert.equal(false, await Service._remoteSetup());
+ Assert.equal(Service.status.sync, METARECORD_DOWNLOAD_FAIL);
+ mock.restore();
+
+ await Service.login();
+ mock = mockHandler(GLOBAL_PATH, returnStatusCode("GET", 404));
+ Service.recordManager.del(Service.metaURL);
+ _("Checking that remoteSetup recovers on 404 on first get /meta/global.");
+ Assert.ok(await Service._remoteSetup());
+ mock.restore();
+
+ let makeOutdatedMeta = async () => {
+ Service.metaModified = 0;
+ let infoResponse = await Service._fetchInfo();
+ return {
+ status: infoResponse.status,
+ obj: {
+ crypto: infoResponse.obj.crypto,
+ clients: infoResponse.obj.clients,
+ meta: 1,
+ },
+ };
+ };
+
+ _(
+ "Checking that remoteSetup recovers on 404 on get /meta/global after clear cached one."
+ );
+ mock = mockHandler(GLOBAL_PATH, returnStatusCode("GET", 404));
+ Service.recordManager.set(Service.metaURL, { isNew: false });
+ Assert.ok(await Service._remoteSetup(await makeOutdatedMeta()));
+ mock.restore();
+
+ _(
+ "Checking that remoteSetup returns false on 503 on get /meta/global after clear cached one."
+ );
+ mock = mockHandler(GLOBAL_PATH, returnStatusCode("GET", 503));
+ Service.status.sync = "";
+ Service.recordManager.set(Service.metaURL, { isNew: false });
+ Assert.equal(false, await Service._remoteSetup(await makeOutdatedMeta()));
+ Assert.equal(Service.status.sync, "");
+ mock.restore();
+
+ metaColl.delete({});
+
+ _("Do an initial sync.");
+ await Service.sync();
+
+ _("Checking that remoteSetup returns true.");
+ Assert.ok(await Service._remoteSetup());
+
+ _("Verify that the meta record was uploaded.");
+ Assert.equal(meta_global.data.syncID, Service.syncID);
+ Assert.equal(meta_global.data.storageVersion, STORAGE_VERSION);
+ Assert.equal(
+ meta_global.data.engines.clients.version,
+ Service.clientsEngine.version
+ );
+ Assert.equal(
+ meta_global.data.engines.clients.syncID,
+ await Service.clientsEngine.getSyncID()
+ );
+
+ _(
+ "Set the collection info hash so that sync() will remember the modified times for future runs."
+ );
+ let lastSync = await Service.clientsEngine.getLastSync();
+ collections.meta = lastSync;
+ collections.clients = lastSync;
+ await Service.sync();
+
+ _("Sync again and verify that meta/global wasn't downloaded again");
+ meta_global.wasCalled = false;
+ await Service.sync();
+ Assert.ok(!meta_global.wasCalled);
+
+ _(
+ "Fake modified records. This will cause a redownload, but not reupload since it hasn't changed."
+ );
+ collections.meta += 42;
+ meta_global.wasCalled = false;
+
+ let metaModified = meta_global.modified;
+
+ await Service.sync();
+ Assert.ok(meta_global.wasCalled);
+ Assert.equal(metaModified, meta_global.modified);
+
+ // Try to screw up HMAC calculation.
+ // Re-encrypt keys with a new random keybundle, and upload them to the
+ // server, just as might happen with a second client.
+ _("Attempting to screw up HMAC by re-encrypting keys.");
+ let keys = Service.collectionKeys.asWBO();
+ let b = new BulkKeyBundle("hmacerror");
+ await b.generateRandom();
+ collections.crypto = keys.modified = 100 + Date.now() / 1000; // Future modification time.
+ await keys.encrypt(b);
+ await keys.upload(Service.resource(Service.cryptoKeysURL));
+
+ Assert.equal(false, await Service.verifyAndFetchSymmetricKeys());
+ Assert.equal(Service.status.login, LOGIN_FAILED_INVALID_PASSPHRASE);
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ server.stop(do_test_finished);
+ }
+});
diff --git a/services/sync/tests/unit/test_service_sync_specified.js b/services/sync/tests/unit/test_service_sync_specified.js
new file mode 100644
index 0000000000..845cdb3669
--- /dev/null
+++ b/services/sync/tests/unit/test_service_sync_specified.js
@@ -0,0 +1,150 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+let syncedEngines = [];
+
+function SteamEngine() {
+ SyncEngine.call(this, "Steam", Service);
+}
+SteamEngine.prototype = {
+ async _sync() {
+ syncedEngines.push(this.name);
+ },
+};
+Object.setPrototypeOf(SteamEngine.prototype, SyncEngine.prototype);
+
+function StirlingEngine() {
+ SyncEngine.call(this, "Stirling", Service);
+}
+StirlingEngine.prototype = {
+ async _sync() {
+ syncedEngines.push(this.name);
+ },
+};
+Object.setPrototypeOf(StirlingEngine.prototype, SteamEngine.prototype);
+
+// Tracking info/collections.
+var collectionsHelper = track_collections_helper();
+var upd = collectionsHelper.with_updated_collection;
+
+function sync_httpd_setup(handlers) {
+ handlers["/1.1/johndoe/info/collections"] = collectionsHelper.handler;
+ delete collectionsHelper.collections.crypto;
+ delete collectionsHelper.collections.meta;
+
+ let cr = new ServerWBO("keys");
+ handlers["/1.1/johndoe/storage/crypto/keys"] = upd("crypto", cr.handler());
+
+ let cl = new ServerCollection();
+ handlers["/1.1/johndoe/storage/clients"] = upd("clients", cl.handler());
+
+ return httpd_setup(handlers);
+}
+
+async function setUp() {
+ syncedEngines = [];
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ engine.syncPriority = 1;
+
+ engine = Service.engineManager.get("stirling");
+ engine.enabled = true;
+ engine.syncPriority = 2;
+
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": new ServerWBO("global", {}).handler(),
+ });
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
+ return server;
+}
+
+add_task(async function setup() {
+ await Service.engineManager.clear();
+ validate_all_future_pings();
+
+ await Service.engineManager.register(SteamEngine);
+ await Service.engineManager.register(StirlingEngine);
+});
+
+add_task(async function test_noEngines() {
+ enableValidationPrefs();
+
+ _("Test: An empty array of engines to sync does nothing.");
+ let server = await setUp();
+
+ try {
+ _("Sync with no engines specified.");
+ await Service.sync({ engines: [] });
+ deepEqual(syncedEngines, [], "no engines were synced");
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_oneEngine() {
+ enableValidationPrefs();
+
+ _("Test: Only one engine is synced.");
+ let server = await setUp();
+
+ try {
+ _("Sync with 1 engine specified.");
+ await Service.sync({ engines: ["steam"] });
+ deepEqual(syncedEngines, ["steam"]);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_bothEnginesSpecified() {
+ enableValidationPrefs();
+
+ _("Test: All engines are synced when specified in the correct order (1).");
+ let server = await setUp();
+
+ try {
+ _("Sync with both engines specified.");
+ await Service.sync({ engines: ["steam", "stirling"] });
+ deepEqual(syncedEngines, ["steam", "stirling"]);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_bothEnginesSpecified() {
+ enableValidationPrefs();
+
+ _("Test: All engines are synced when specified in the correct order (2).");
+ let server = await setUp();
+
+ try {
+ _("Sync with both engines specified.");
+ await Service.sync({ engines: ["stirling", "steam"] });
+ deepEqual(syncedEngines, ["stirling", "steam"]);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_bothEnginesDefault() {
+ enableValidationPrefs();
+
+ _("Test: All engines are synced when nothing is specified.");
+ let server = await setUp();
+
+ try {
+ await Service.sync();
+ deepEqual(syncedEngines, ["steam", "stirling"]);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
diff --git a/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js b/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
new file mode 100644
index 0000000000..fd8b3f71bc
--- /dev/null
+++ b/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
@@ -0,0 +1,587 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+const { EngineSynchronizer } = ChromeUtils.importESModule(
+ "resource://services-sync/stages/enginesync.sys.mjs"
+);
+
+function QuietStore() {
+ Store.call("Quiet");
+}
+QuietStore.prototype = {
+ async getAllIDs() {
+ return [];
+ },
+};
+
+function SteamEngine() {
+ SyncEngine.call(this, "Steam", Service);
+}
+SteamEngine.prototype = {
+ // We're not interested in engine sync but what the service does.
+ _storeObj: QuietStore,
+
+ _sync: async function _sync() {
+ await this._syncStartup();
+ },
+};
+Object.setPrototypeOf(SteamEngine.prototype, SyncEngine.prototype);
+
+function StirlingEngine() {
+ SyncEngine.call(this, "Stirling", Service);
+}
+StirlingEngine.prototype = {
+ // This engine's enabled state is the same as the SteamEngine's.
+ get prefName() {
+ return "steam";
+ },
+};
+Object.setPrototypeOf(StirlingEngine.prototype, SteamEngine.prototype);
+
+// Tracking info/collections.
+var collectionsHelper = track_collections_helper();
+var upd = collectionsHelper.with_updated_collection;
+
+function sync_httpd_setup(handlers) {
+ handlers["/1.1/johndoe/info/collections"] = collectionsHelper.handler;
+ delete collectionsHelper.collections.crypto;
+ delete collectionsHelper.collections.meta;
+
+ let cr = new ServerWBO("keys");
+ handlers["/1.1/johndoe/storage/crypto/keys"] = upd("crypto", cr.handler());
+
+ let cl = new ServerCollection();
+ handlers["/1.1/johndoe/storage/clients"] = upd("clients", cl.handler());
+
+ return httpd_setup(handlers);
+}
+
+async function setUp(server) {
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
+ // Ensure that the server has valid keys so that logging in will work and not
+ // result in a server wipe, rendering many of these tests useless.
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ let { success } = await serverKeys.upload(
+ Service.resource(Service.cryptoKeysURL)
+ );
+ ok(success);
+}
+
+const PAYLOAD = 42;
+
+add_task(async function setup() {
+ await Service.engineManager.clear();
+ validate_all_future_pings();
+
+ await Service.engineManager.register(SteamEngine);
+ await Service.engineManager.register(StirlingEngine);
+});
+
+add_task(async function test_newAccount() {
+ enableValidationPrefs();
+
+ _("Test: New account does not disable locally enabled engines.");
+ let engine = Service.engineManager.get("steam");
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": new ServerWBO("global", {}).handler(),
+ "/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler(),
+ });
+ await setUp(server);
+
+ try {
+ _("Engine is enabled from the beginning.");
+ Service._ignorePrefObserver = true;
+ engine.enabled = true;
+ Service._ignorePrefObserver = false;
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Engine continues to be enabled.");
+ Assert.ok(engine.enabled);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_enabledLocally() {
+ enableValidationPrefs();
+
+ _("Test: Engine is disabled on remote clients and enabled locally");
+ Service.syncID = "abcdefghij";
+ let engine = Service.engineManager.get("steam");
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {},
+ });
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ "/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler(),
+ });
+ await setUp(server);
+
+ try {
+ _("Enable engine locally.");
+ engine.enabled = true;
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Meta record now contains the new engine.");
+ Assert.ok(!!metaWBO.data.engines.steam);
+
+ _("Engine continues to be enabled.");
+ Assert.ok(engine.enabled);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_disabledLocally() {
+ enableValidationPrefs();
+
+ _("Test: Engine is enabled on remote clients and disabled locally");
+ Service.syncID = "abcdefghij";
+ let engine = Service.engineManager.get("steam");
+ let syncID = await engine.resetLocalSyncID();
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: { steam: { syncID, version: engine.version } },
+ });
+ let steamCollection = new ServerWBO("steam", PAYLOAD);
+
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ "/1.1/johndoe/storage/steam": steamCollection.handler(),
+ });
+ await setUp(server);
+
+ try {
+ _("Disable engine locally.");
+ Service._ignorePrefObserver = true;
+ engine.enabled = true;
+ Service._ignorePrefObserver = false;
+ engine.enabled = false;
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Meta record no longer contains engine.");
+ Assert.ok(!metaWBO.data.engines.steam);
+
+ _("Server records are wiped.");
+ Assert.equal(steamCollection.payload, undefined);
+
+ _("Engine continues to be disabled.");
+ Assert.ok(!engine.enabled);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_disabledLocally_wipe503() {
+ enableValidationPrefs();
+
+ _("Test: Engine is enabled on remote clients and disabled locally");
+ Service.syncID = "abcdefghij";
+ let engine = Service.engineManager.get("steam");
+ let syncID = await engine.resetLocalSyncID();
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: { steam: { syncID, version: engine.version } },
+ });
+
+ function service_unavailable(request, response) {
+ let body = "Service Unavailable";
+ response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
+ response.setHeader("Retry-After", "23");
+ response.bodyOutputStream.write(body, body.length);
+ }
+
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ "/1.1/johndoe/storage/steam": service_unavailable,
+ });
+ await setUp(server);
+
+ _("Disable engine locally.");
+ Service._ignorePrefObserver = true;
+ engine.enabled = true;
+ Service._ignorePrefObserver = false;
+ engine.enabled = false;
+
+ _("Sync.");
+ await Service.sync();
+ Assert.equal(Service.status.sync, SERVER_MAINTENANCE);
+
+ await Service.startOver();
+ await promiseStopServer(server);
+});
+
+add_task(async function test_enabledRemotely() {
+ enableValidationPrefs();
+
+ _("Test: Engine is disabled locally and enabled on a remote client");
+ Service.syncID = "abcdefghij";
+ let engine = Service.engineManager.get("steam");
+ let syncID = await engine.resetLocalSyncID();
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: { steam: { syncID, version: engine.version } },
+ });
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": upd("meta", metaWBO.handler()),
+
+ "/1.1/johndoe/storage/steam": upd(
+ "steam",
+ new ServerWBO("steam", {}).handler()
+ ),
+ });
+ await setUp(server);
+
+ // We need to be very careful how we do this, so that we don't trigger a
+ // fresh start!
+ try {
+ _("Upload some keys to avoid a fresh start.");
+ let wbo = await Service.collectionKeys.generateNewKeysWBO();
+ await wbo.encrypt(Service.identity.syncKeyBundle);
+ Assert.equal(
+ 200,
+ (await wbo.upload(Service.resource(Service.cryptoKeysURL))).status
+ );
+
+ _("Engine is disabled.");
+ Assert.ok(!engine.enabled);
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Engine is enabled.");
+ Assert.ok(engine.enabled);
+
+ _("Meta record still present.");
+ Assert.equal(metaWBO.data.engines.steam.syncID, await engine.getSyncID());
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_disabledRemotelyTwoClients() {
+ enableValidationPrefs();
+
+ _(
+ "Test: Engine is enabled locally and disabled on a remote client... with two clients."
+ );
+ Service.syncID = "abcdefghij";
+ let engine = Service.engineManager.get("steam");
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {},
+ });
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": upd("meta", metaWBO.handler()),
+
+ "/1.1/johndoe/storage/steam": upd(
+ "steam",
+ new ServerWBO("steam", {}).handler()
+ ),
+ });
+ await setUp(server);
+
+ try {
+ _("Enable engine locally.");
+ Service._ignorePrefObserver = true;
+ engine.enabled = true;
+ Service._ignorePrefObserver = false;
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Disable engine by deleting from meta/global.");
+ let d = metaWBO.data;
+ delete d.engines.steam;
+ metaWBO.payload = JSON.stringify(d);
+ metaWBO.modified = Date.now() / 1000;
+
+ _("Add a second client and verify that the local pref is changed.");
+ Service.clientsEngine._store._remoteClients.foobar = {
+ name: "foobar",
+ type: "desktop",
+ };
+ await Service.sync();
+
+ _("Engine is disabled.");
+ Assert.ok(!engine.enabled);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_disabledRemotely() {
+ enableValidationPrefs();
+
+ _("Test: Engine is enabled locally and disabled on a remote client");
+ Service.syncID = "abcdefghij";
+ let engine = Service.engineManager.get("steam");
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {},
+ });
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ "/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler(),
+ });
+ await setUp(server);
+
+ try {
+ _("Enable engine locally.");
+ Service._ignorePrefObserver = true;
+ engine.enabled = true;
+ Service._ignorePrefObserver = false;
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Engine is not disabled: only one client.");
+ Assert.ok(engine.enabled);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_dependentEnginesEnabledLocally() {
+ enableValidationPrefs();
+
+ _("Test: Engine is disabled on remote clients and enabled locally");
+ Service.syncID = "abcdefghij";
+ let steamEngine = Service.engineManager.get("steam");
+ let stirlingEngine = Service.engineManager.get("stirling");
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {},
+ });
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ "/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler(),
+ "/1.1/johndoe/storage/stirling": new ServerWBO("stirling", {}).handler(),
+ });
+ await setUp(server);
+
+ try {
+ _("Enable engine locally. Doing it on one is enough.");
+ steamEngine.enabled = true;
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Meta record now contains the new engines.");
+ Assert.ok(!!metaWBO.data.engines.steam);
+ Assert.ok(!!metaWBO.data.engines.stirling);
+
+ _("Engines continue to be enabled.");
+ Assert.ok(steamEngine.enabled);
+ Assert.ok(stirlingEngine.enabled);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_dependentEnginesDisabledLocally() {
+ enableValidationPrefs();
+
+ _(
+ "Test: Two dependent engines are enabled on remote clients and disabled locally"
+ );
+ Service.syncID = "abcdefghij";
+ let steamEngine = Service.engineManager.get("steam");
+ let steamSyncID = await steamEngine.resetLocalSyncID();
+ let stirlingEngine = Service.engineManager.get("stirling");
+ let stirlingSyncID = await stirlingEngine.resetLocalSyncID();
+ let metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {
+ steam: { syncID: steamSyncID, version: steamEngine.version },
+ stirling: { syncID: stirlingSyncID, version: stirlingEngine.version },
+ },
+ });
+
+ let steamCollection = new ServerWBO("steam", PAYLOAD);
+ let stirlingCollection = new ServerWBO("stirling", PAYLOAD);
+
+ let server = sync_httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ "/1.1/johndoe/storage/steam": steamCollection.handler(),
+ "/1.1/johndoe/storage/stirling": stirlingCollection.handler(),
+ });
+ await setUp(server);
+
+ try {
+ _("Disable engines locally. Doing it on one is enough.");
+ Service._ignorePrefObserver = true;
+ steamEngine.enabled = true;
+ Assert.ok(stirlingEngine.enabled);
+ Service._ignorePrefObserver = false;
+ steamEngine.enabled = false;
+ Assert.ok(!stirlingEngine.enabled);
+
+ _("Sync.");
+ await Service.sync();
+
+ _("Meta record no longer contains engines.");
+ Assert.ok(!metaWBO.data.engines.steam);
+ Assert.ok(!metaWBO.data.engines.stirling);
+
+ _("Server records are wiped.");
+ Assert.equal(steamCollection.payload, undefined);
+ Assert.equal(stirlingCollection.payload, undefined);
+
+ _("Engines continue to be disabled.");
+ Assert.ok(!steamEngine.enabled);
+ Assert.ok(!stirlingEngine.enabled);
+ } finally {
+ await Service.startOver();
+ await promiseStopServer(server);
+ }
+});
+
+add_task(async function test_service_updateLocalEnginesState() {
+ Service.syncID = "abcdefghij";
+ const engine = Service.engineManager.get("steam");
+ const metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ declined: ["steam"],
+ engines: {},
+ });
+ const server = httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ });
+ await SyncTestingInfrastructure(server, "johndoe");
+
+ // Disconnect sync.
+ await Service.startOver();
+ Service._ignorePrefObserver = true;
+ // Steam engine is enabled on our machine.
+ engine.enabled = true;
+ Service._ignorePrefObserver = false;
+ Service.identity._findCluster = () => server.baseURI + "/1.1/johndoe/";
+
+ // Update engine state from the server.
+ await Service.updateLocalEnginesState();
+ // Now disabled.
+ Assert.ok(!engine.enabled);
+});
+
+add_task(async function test_service_enableAfterUpdateState() {
+ Service.syncID = "abcdefghij";
+ const engine = Service.engineManager.get("steam");
+ const metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ declined: ["steam"],
+ engines: { someengine: {} },
+ });
+ const server = httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ });
+ await SyncTestingInfrastructure(server, "johndoe");
+
+ // Disconnect sync.
+ await Service.startOver();
+ Service.identity._findCluster = () => server.baseURI + "/1.1/johndoe/";
+
+ // Update engine state from the server.
+ await Service.updateLocalEnginesState();
+ // Now disabled, reflecting what's on the server.
+ Assert.ok(!engine.enabled);
+ // Enable the engine, as though the user selected it via CWTS.
+ engine.enabled = true;
+
+ // Do the "reconcile local and remote states" dance.
+ let engineSync = new EngineSynchronizer(Service);
+ await engineSync._updateEnabledEngines();
+ await Service._maybeUpdateDeclined();
+ // engine should remain enabled.
+ Assert.ok(engine.enabled);
+ // engine should no longer appear in declined on the server.
+ Assert.deepEqual(metaWBO.data.declined, []);
+});
+
+add_task(async function test_service_disableAfterUpdateState() {
+ Service.syncID = "abcdefghij";
+ const engine = Service.engineManager.get("steam");
+ const metaWBO = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ declined: [],
+ engines: { steam: {} },
+ });
+ const server = httpd_setup({
+ "/1.1/johndoe/storage/meta/global": metaWBO.handler(),
+ });
+ await SyncTestingInfrastructure(server, "johndoe");
+
+ // Disconnect sync.
+ await Service.startOver();
+ Service.identity._findCluster = () => server.baseURI + "/1.1/johndoe/";
+
+ // Update engine state from the server.
+ await Service.updateLocalEnginesState();
+ // Now enabled, reflecting what's on the server.
+ Assert.ok(engine.enabled);
+ // Disable the engine, as though via CWTS.
+ engine.enabled = false;
+
+ // Do the "reconcile local and remote states" dance.
+ let engineSync = new EngineSynchronizer(Service);
+ await engineSync._updateEnabledEngines();
+ await Service._maybeUpdateDeclined();
+ // engine should remain disabled.
+ Assert.ok(!engine.enabled);
+ // engine should now appear in declined on the server.
+ Assert.deepEqual(metaWBO.data.declined, ["steam"]);
+ // and should have been removed from engines.
+ Assert.deepEqual(metaWBO.data.engines, {});
+});
+
+add_task(async function test_service_updateLocalEnginesState_no_meta_global() {
+ Service.syncID = "abcdefghij";
+ const engine = Service.engineManager.get("steam");
+ // The server doesn't contain /meta/global (sync was never enabled).
+ const server = httpd_setup({});
+ await SyncTestingInfrastructure(server, "johndoe");
+
+ // Disconnect sync.
+ await Service.startOver();
+ Service._ignorePrefObserver = true;
+ // Steam engine is enabled on our machine.
+ engine.enabled = true;
+ Service._ignorePrefObserver = false;
+ Service.identity._findCluster = () => server.baseURI + "/1.1/johndoe/";
+
+ // Update engine state from the server.
+ await Service.updateLocalEnginesState();
+ // Still enabled.
+ Assert.ok(engine.enabled);
+});
diff --git a/services/sync/tests/unit/test_service_verifyLogin.js b/services/sync/tests/unit/test_service_verifyLogin.js
new file mode 100644
index 0000000000..b99b5c692c
--- /dev/null
+++ b/services/sync/tests/unit/test_service_verifyLogin.js
@@ -0,0 +1,118 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function login_handling(handler) {
+ return function (request, response) {
+ if (has_hawk_header(request)) {
+ handler(request, response);
+ } else {
+ let body = "Unauthorized";
+ response.setStatusLine(request.httpVersion, 401, "Unauthorized");
+ response.bodyOutputStream.write(body, body.length);
+ }
+ };
+}
+
+function service_unavailable(request, response) {
+ let body = "Service Unavailable";
+ response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
+ response.setHeader("Retry-After", "42");
+ response.bodyOutputStream.write(body, body.length);
+}
+
+function run_test() {
+ Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+
+ run_next_test();
+}
+
+add_task(async function test_verifyLogin() {
+ // This test expects a clean slate -- no saved passphrase.
+ Services.logins.removeAllUserFacingLogins();
+ let johnHelper = track_collections_helper();
+ let johnU = johnHelper.with_updated_collection;
+
+ do_test_pending();
+
+ let server = httpd_setup({
+ "/1.1/johndoe/info/collections": login_handling(johnHelper.handler),
+ "/1.1/janedoe/info/collections": service_unavailable,
+
+ "/1.1/johndoe/storage/crypto/keys": johnU(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/meta/global": johnU(
+ "meta",
+ new ServerWBO("global").handler()
+ ),
+ });
+
+ try {
+ _("Force the initial state.");
+ Service.status.service = STATUS_OK;
+ Assert.equal(Service.status.service, STATUS_OK);
+
+ _("Credentials won't check out because we're not configured yet.");
+ Service.status.resetSync();
+ Assert.equal(false, await Service.verifyLogin());
+ Assert.equal(Service.status.service, CLIENT_NOT_CONFIGURED);
+ Assert.equal(Service.status.login, LOGIN_FAILED_NO_USERNAME);
+
+ _("Success if syncBundleKey is set.");
+ Service.status.resetSync();
+ await configureIdentity({ username: "johndoe" }, server);
+ Assert.ok(await Service.verifyLogin());
+ Assert.equal(Service.status.service, STATUS_OK);
+ Assert.equal(Service.status.login, LOGIN_SUCCEEDED);
+
+ _(
+ "If verifyLogin() encounters a server error, it flips on the backoff flag and notifies observers on a 503 with Retry-After."
+ );
+ Service.status.resetSync();
+ await configureIdentity({ username: "janedoe" }, server);
+ Service._updateCachedURLs();
+ Assert.ok(!Service.status.enforceBackoff);
+ let backoffInterval;
+ Svc.Obs.add(
+ "weave:service:backoff:interval",
+ function observe(subject, data) {
+ Svc.Obs.remove("weave:service:backoff:interval", observe);
+ backoffInterval = subject;
+ }
+ );
+ Assert.equal(false, await Service.verifyLogin());
+ Assert.ok(Service.status.enforceBackoff);
+ Assert.equal(backoffInterval, 42);
+ Assert.equal(Service.status.service, LOGIN_FAILED);
+ Assert.equal(Service.status.login, SERVER_MAINTENANCE);
+
+ _(
+ "Ensure a network error when finding the cluster sets the right Status bits."
+ );
+ Service.status.resetSync();
+ Service.clusterURL = "";
+ Service.identity._findCluster = () => "http://localhost:12345/";
+ Assert.equal(false, await Service.verifyLogin());
+ Assert.equal(Service.status.service, LOGIN_FAILED);
+ Assert.equal(Service.status.login, LOGIN_FAILED_NETWORK_ERROR);
+
+ _(
+ "Ensure a network error when getting the collection info sets the right Status bits."
+ );
+ Service.status.resetSync();
+ Service.clusterURL = "http://localhost:12345/";
+ Assert.equal(false, await Service.verifyLogin());
+ Assert.equal(Service.status.service, LOGIN_FAILED);
+ Assert.equal(Service.status.login, LOGIN_FAILED_NETWORK_ERROR);
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ server.stop(do_test_finished);
+ }
+});
diff --git a/services/sync/tests/unit/test_service_wipeClient.js b/services/sync/tests/unit/test_service_wipeClient.js
new file mode 100644
index 0000000000..aa48868ca0
--- /dev/null
+++ b/services/sync/tests/unit/test_service_wipeClient.js
@@ -0,0 +1,78 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+function CanDecryptEngine() {
+ SyncEngine.call(this, "CanDecrypt", Service);
+}
+CanDecryptEngine.prototype = {
+ // Override these methods with mocks for the test
+ async canDecrypt() {
+ return true;
+ },
+
+ wasWiped: false,
+ async wipeClient() {
+ this.wasWiped = true;
+ },
+};
+Object.setPrototypeOf(CanDecryptEngine.prototype, SyncEngine.prototype);
+
+function CannotDecryptEngine() {
+ SyncEngine.call(this, "CannotDecrypt", Service);
+}
+CannotDecryptEngine.prototype = {
+ // Override these methods with mocks for the test
+ async canDecrypt() {
+ return false;
+ },
+
+ wasWiped: false,
+ async wipeClient() {
+ this.wasWiped = true;
+ },
+};
+Object.setPrototypeOf(CannotDecryptEngine.prototype, SyncEngine.prototype);
+
+let canDecryptEngine;
+let cannotDecryptEngine;
+
+add_task(async function setup() {
+ await Service.engineManager.clear();
+
+ await Service.engineManager.register(CanDecryptEngine);
+ await Service.engineManager.register(CannotDecryptEngine);
+ canDecryptEngine = Service.engineManager.get("candecrypt");
+ cannotDecryptEngine = Service.engineManager.get("cannotdecrypt");
+});
+
+add_task(async function test_withEngineList() {
+ try {
+ _("Ensure initial scenario.");
+ Assert.ok(!canDecryptEngine.wasWiped);
+ Assert.ok(!cannotDecryptEngine.wasWiped);
+
+ _("Wipe local engine data.");
+ await Service.wipeClient(["candecrypt", "cannotdecrypt"]);
+
+ _("Ensure only the engine that can decrypt was wiped.");
+ Assert.ok(canDecryptEngine.wasWiped);
+ Assert.ok(!cannotDecryptEngine.wasWiped);
+ } finally {
+ canDecryptEngine.wasWiped = false;
+ cannotDecryptEngine.wasWiped = false;
+ await Service.startOver();
+ }
+});
+
+add_task(async function test_startOver_clears_keys() {
+ syncTestLogging();
+ await generateNewKeys(Service.collectionKeys);
+ Assert.ok(!!Service.collectionKeys.keyForCollection());
+ await Service.startOver();
+ syncTestLogging();
+ Assert.ok(!Service.collectionKeys.keyForCollection());
+});
diff --git a/services/sync/tests/unit/test_service_wipeServer.js b/services/sync/tests/unit/test_service_wipeServer.js
new file mode 100644
index 0000000000..9fc2592aa8
--- /dev/null
+++ b/services/sync/tests/unit/test_service_wipeServer.js
@@ -0,0 +1,240 @@
+Svc.PrefBranch.setStringPref("registerEngines", "");
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+// configure the identity we use for this test.
+const identityConfig = makeIdentityConfig({ username: "johndoe" });
+
+function FakeCollection() {
+ this.deleted = false;
+}
+FakeCollection.prototype = {
+ handler() {
+ let self = this;
+ return function (request, response) {
+ let body = "";
+ self.timestamp = new_timestamp();
+ let timestamp = "" + self.timestamp;
+ if (request.method == "DELETE") {
+ body = timestamp;
+ self.deleted = true;
+ }
+ response.setHeader("X-Weave-Timestamp", timestamp);
+ response.setStatusLine(request.httpVersion, 200, "OK");
+ response.bodyOutputStream.write(body, body.length);
+ };
+ },
+};
+
+async function setUpTestFixtures(server) {
+ Service.clusterURL = server.baseURI + "/";
+
+ await configureIdentity(identityConfig);
+}
+
+add_task(async function test_wipeServer_list_success() {
+ _("Service.wipeServer() deletes collections given as argument.");
+
+ let steam_coll = new FakeCollection();
+ let diesel_coll = new FakeCollection();
+
+ let server = httpd_setup({
+ "/1.1/johndoe/storage/steam": steam_coll.handler(),
+ "/1.1/johndoe/storage/diesel": diesel_coll.handler(),
+ "/1.1/johndoe/storage/petrol": httpd_handler(404, "Not Found"),
+ });
+
+ try {
+ await setUpTestFixtures(server);
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
+
+ _("Confirm initial environment.");
+ Assert.ok(!steam_coll.deleted);
+ Assert.ok(!diesel_coll.deleted);
+
+ _(
+ "wipeServer() will happily ignore the non-existent collection and use the timestamp of the last DELETE that was successful."
+ );
+ let timestamp = await Service.wipeServer(["steam", "diesel", "petrol"]);
+ Assert.equal(timestamp, diesel_coll.timestamp);
+
+ _(
+ "wipeServer stopped deleting after encountering an error with the 'petrol' collection, thus only 'steam' has been deleted."
+ );
+ Assert.ok(steam_coll.deleted);
+ Assert.ok(diesel_coll.deleted);
+ } finally {
+ await promiseStopServer(server);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_wipeServer_list_503() {
+ _("Service.wipeServer() deletes collections given as argument.");
+
+ let steam_coll = new FakeCollection();
+ let diesel_coll = new FakeCollection();
+
+ let server = httpd_setup({
+ "/1.1/johndoe/storage/steam": steam_coll.handler(),
+ "/1.1/johndoe/storage/petrol": httpd_handler(503, "Service Unavailable"),
+ "/1.1/johndoe/storage/diesel": diesel_coll.handler(),
+ });
+
+ try {
+ await setUpTestFixtures(server);
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
+
+ _("Confirm initial environment.");
+ Assert.ok(!steam_coll.deleted);
+ Assert.ok(!diesel_coll.deleted);
+
+ _(
+ "wipeServer() will happily ignore the non-existent collection, delete the 'steam' collection and abort after an receiving an error on the 'petrol' collection."
+ );
+ let error;
+ try {
+ await Service.wipeServer(["non-existent", "steam", "petrol", "diesel"]);
+ do_throw("Should have thrown!");
+ } catch (ex) {
+ error = ex;
+ }
+ _("wipeServer() threw this exception: " + error);
+ Assert.equal(error.status, 503);
+
+ _(
+ "wipeServer stopped deleting after encountering an error with the 'petrol' collection, thus only 'steam' has been deleted."
+ );
+ Assert.ok(steam_coll.deleted);
+ Assert.ok(!diesel_coll.deleted);
+ } finally {
+ await promiseStopServer(server);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_wipeServer_all_success() {
+ _("Service.wipeServer() deletes all the things.");
+
+ /**
+ * Handle the bulk DELETE request sent by wipeServer.
+ */
+ let deleted = false;
+ let serverTimestamp;
+ function storageHandler(request, response) {
+ Assert.equal("DELETE", request.method);
+ Assert.ok(request.hasHeader("X-Confirm-Delete"));
+ deleted = true;
+ serverTimestamp = return_timestamp(request, response);
+ }
+
+ let server = httpd_setup({
+ "/1.1/johndoe/storage": storageHandler,
+ });
+ await setUpTestFixtures(server);
+
+ _("Try deletion.");
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
+ let returnedTimestamp = await Service.wipeServer();
+ Assert.ok(deleted);
+ Assert.equal(returnedTimestamp, serverTimestamp);
+
+ await promiseStopServer(server);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+});
+
+add_task(async function test_wipeServer_all_404() {
+ _("Service.wipeServer() accepts a 404.");
+
+ /**
+ * Handle the bulk DELETE request sent by wipeServer. Returns a 404.
+ */
+ let deleted = false;
+ let serverTimestamp;
+ function storageHandler(request, response) {
+ Assert.equal("DELETE", request.method);
+ Assert.ok(request.hasHeader("X-Confirm-Delete"));
+ deleted = true;
+ serverTimestamp = new_timestamp();
+ response.setHeader("X-Weave-Timestamp", "" + serverTimestamp);
+ response.setStatusLine(request.httpVersion, 404, "Not Found");
+ }
+
+ let server = httpd_setup({
+ "/1.1/johndoe/storage": storageHandler,
+ });
+ await setUpTestFixtures(server);
+
+ _("Try deletion.");
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
+ let returnedTimestamp = await Service.wipeServer();
+ Assert.ok(deleted);
+ Assert.equal(returnedTimestamp, serverTimestamp);
+
+ await promiseStopServer(server);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+});
+
+add_task(async function test_wipeServer_all_503() {
+ _("Service.wipeServer() throws if it encounters a non-200/404 response.");
+
+ /**
+ * Handle the bulk DELETE request sent by wipeServer. Returns a 503.
+ */
+ function storageHandler(request, response) {
+ Assert.equal("DELETE", request.method);
+ Assert.ok(request.hasHeader("X-Confirm-Delete"));
+ response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
+ }
+
+ let server = httpd_setup({
+ "/1.1/johndoe/storage": storageHandler,
+ });
+ await setUpTestFixtures(server);
+
+ _("Try deletion.");
+ let error;
+ try {
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
+ await Service.wipeServer();
+ do_throw("Should have thrown!");
+ } catch (ex) {
+ error = ex;
+ }
+ Assert.equal(error.status, 503);
+
+ await promiseStopServer(server);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+});
+
+add_task(async function test_wipeServer_all_connectionRefused() {
+ _("Service.wipeServer() throws if it encounters a network problem.");
+ let server = httpd_setup({});
+ await setUpTestFixtures(server);
+
+ Service.clusterURL = "http://localhost:4352/";
+
+ _("Try deletion.");
+ try {
+ await Service.wipeServer();
+ do_throw("Should have thrown!");
+ } catch (ex) {
+ Assert.equal(ex.result, Cr.NS_ERROR_CONNECTION_REFUSED);
+ }
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_status.js b/services/sync/tests/unit/test_status.js
new file mode 100644
index 0000000000..5bcfa182c3
--- /dev/null
+++ b/services/sync/tests/unit/test_status.js
@@ -0,0 +1,83 @@
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+
+function run_test() {
+ // Check initial states
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.backoffInterval, 0);
+ Assert.equal(Status.minimumNextSync, 0);
+
+ Assert.equal(Status.service, STATUS_OK);
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ Assert.equal(Status.login, LOGIN_SUCCEEDED);
+ if (Status.engines.length) {
+ do_throw("Status.engines should be empty.");
+ }
+ Assert.equal(Status.partial, false);
+
+ // Check login status
+ for (let code of [LOGIN_FAILED_NO_USERNAME, LOGIN_FAILED_NO_PASSPHRASE]) {
+ Status.login = code;
+ Assert.equal(Status.login, code);
+ Assert.equal(Status.service, CLIENT_NOT_CONFIGURED);
+ Status.resetSync();
+ }
+
+ Status.login = LOGIN_FAILED;
+ Assert.equal(Status.login, LOGIN_FAILED);
+ Assert.equal(Status.service, LOGIN_FAILED);
+ Status.resetSync();
+
+ Status.login = LOGIN_SUCCEEDED;
+ Assert.equal(Status.login, LOGIN_SUCCEEDED);
+ Assert.equal(Status.service, STATUS_OK);
+ Status.resetSync();
+
+ // Check sync status
+ Status.sync = SYNC_FAILED;
+ Assert.equal(Status.sync, SYNC_FAILED);
+ Assert.equal(Status.service, SYNC_FAILED);
+
+ Status.sync = SYNC_SUCCEEDED;
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ Assert.equal(Status.service, STATUS_OK);
+
+ Status.resetSync();
+
+ // Check engine status
+ Status.engines = ["testEng1", ENGINE_SUCCEEDED];
+ Assert.equal(Status.engines.testEng1, ENGINE_SUCCEEDED);
+ Assert.equal(Status.service, STATUS_OK);
+
+ Status.engines = ["testEng2", ENGINE_DOWNLOAD_FAIL];
+ Assert.equal(Status.engines.testEng1, ENGINE_SUCCEEDED);
+ Assert.equal(Status.engines.testEng2, ENGINE_DOWNLOAD_FAIL);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+
+ Status.engines = ["testEng3", ENGINE_SUCCEEDED];
+ Assert.equal(Status.engines.testEng1, ENGINE_SUCCEEDED);
+ Assert.equal(Status.engines.testEng2, ENGINE_DOWNLOAD_FAIL);
+ Assert.equal(Status.engines.testEng3, ENGINE_SUCCEEDED);
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+
+ // Check resetSync
+ Status.sync = SYNC_FAILED;
+ Status.resetSync();
+
+ Assert.equal(Status.service, STATUS_OK);
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+ if (Status.engines.length) {
+ do_throw("Status.engines should be empty.");
+ }
+
+ // Check resetBackoff
+ Status.enforceBackoff = true;
+ Status.backOffInterval = 4815162342;
+ Status.backOffInterval = 42;
+ Status.resetBackoff();
+
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.backoffInterval, 0);
+ Assert.equal(Status.minimumNextSync, 0);
+}
diff --git a/services/sync/tests/unit/test_status_checkSetup.js b/services/sync/tests/unit/test_status_checkSetup.js
new file mode 100644
index 0000000000..fe3e7cad8d
--- /dev/null
+++ b/services/sync/tests/unit/test_status_checkSetup.js
@@ -0,0 +1,26 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+
+add_task(async function test_status_checkSetup() {
+ try {
+ _("Fresh setup, we're not configured.");
+ Assert.equal(Status.checkSetup(), CLIENT_NOT_CONFIGURED);
+ Assert.equal(Status.login, LOGIN_FAILED_NO_USERNAME);
+ Status.resetSync();
+
+ _("Let's provide the syncKeyBundle");
+ await configureIdentity();
+
+ _("checkSetup()");
+ Assert.equal(Status.checkSetup(), STATUS_OK);
+ Status.resetSync();
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
diff --git a/services/sync/tests/unit/test_sync_auth_manager.js b/services/sync/tests/unit/test_sync_auth_manager.js
new file mode 100644
index 0000000000..9af40d26c6
--- /dev/null
+++ b/services/sync/tests/unit/test_sync_auth_manager.js
@@ -0,0 +1,1027 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { AuthenticationError, SyncAuthManager } = ChromeUtils.importESModule(
+ "resource://services-sync/sync_auth.sys.mjs"
+);
+const { Resource } = ChromeUtils.importESModule(
+ "resource://services-sync/resource.sys.mjs"
+);
+const { initializeIdentityWithTokenServerResponse } =
+ ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/fxa_utils.sys.mjs"
+ );
+const { HawkClient } = ChromeUtils.importESModule(
+ "resource://services-common/hawkclient.sys.mjs"
+);
+const { FxAccounts } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+);
+const { FxAccountsClient } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccountsClient.sys.mjs"
+);
+const {
+ ERRNO_INVALID_AUTH_TOKEN,
+ ONLOGIN_NOTIFICATION,
+ ONVERIFIED_NOTIFICATION,
+} = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccountsCommon.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+const { TokenServerClient, TokenServerClientServerError } =
+ ChromeUtils.importESModule(
+ "resource://services-common/tokenserverclient.sys.mjs"
+ );
+const { AccountState } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+);
+
+const SECOND_MS = 1000;
+const MINUTE_MS = SECOND_MS * 60;
+const HOUR_MS = MINUTE_MS * 60;
+
+const MOCK_ACCESS_TOKEN =
+ "e3c5caf17f27a0d9e351926a928938b3737df43e91d4992a5a5fca9a7bdef8ba";
+
+var globalIdentityConfig = makeIdentityConfig();
+var globalSyncAuthManager = new SyncAuthManager();
+configureFxAccountIdentity(globalSyncAuthManager, globalIdentityConfig);
+
+/**
+ * Mock client clock and skew vs server in FxAccounts signed-in user module and
+ * API client. sync_auth.js queries these values to construct HAWK
+ * headers. We will use this to test clock skew compensation in these headers
+ * below.
+ */
+var MockFxAccountsClient = function () {
+ FxAccountsClient.apply(this);
+};
+MockFxAccountsClient.prototype = {
+ accountStatus() {
+ return Promise.resolve(true);
+ },
+ getScopedKeyData() {
+ return Promise.resolve({
+ "https://identity.mozilla.com/apps/oldsync": {
+ identifier: "https://identity.mozilla.com/apps/oldsync",
+ keyRotationSecret:
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ keyRotationTimestamp: 1234567890123,
+ },
+ });
+ },
+};
+Object.setPrototypeOf(
+ MockFxAccountsClient.prototype,
+ FxAccountsClient.prototype
+);
+
+add_test(function test_initial_state() {
+ _("Verify initial state");
+ Assert.ok(!globalSyncAuthManager._token);
+ Assert.ok(!globalSyncAuthManager._hasValidToken());
+ run_next_test();
+});
+
+add_task(async function test_initialialize() {
+ _("Verify start after fetching token");
+ await globalSyncAuthManager._ensureValidToken();
+ Assert.ok(!!globalSyncAuthManager._token);
+ Assert.ok(globalSyncAuthManager._hasValidToken());
+});
+
+add_task(async function test_refreshOAuthTokenOn401() {
+ _("Refreshes the FXA OAuth token after a 401.");
+ let getTokenCount = 0;
+ let syncAuthManager = new SyncAuthManager();
+ let identityConfig = makeIdentityConfig();
+ let fxaInternal = makeFxAccountsInternalMock(identityConfig);
+ configureFxAccountIdentity(syncAuthManager, identityConfig, fxaInternal);
+ syncAuthManager._fxaService._internal.initialize();
+ syncAuthManager._fxaService.getOAuthToken = () => {
+ ++getTokenCount;
+ return Promise.resolve(MOCK_ACCESS_TOKEN);
+ };
+
+ let didReturn401 = false;
+ let didReturn200 = false;
+ let mockTSC = mockTokenServer(() => {
+ if (getTokenCount <= 1) {
+ didReturn401 = true;
+ return {
+ status: 401,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({}),
+ };
+ }
+ didReturn200 = true;
+ return {
+ status: 200,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({
+ id: "id",
+ key: "key",
+ api_endpoint: "http://example.com/",
+ uid: "uid",
+ duration: 300,
+ }),
+ };
+ });
+
+ syncAuthManager._tokenServerClient = mockTSC;
+
+ await syncAuthManager._ensureValidToken();
+
+ Assert.equal(getTokenCount, 2);
+ Assert.ok(didReturn401);
+ Assert.ok(didReturn200);
+ Assert.ok(syncAuthManager._token);
+ Assert.ok(syncAuthManager._hasValidToken());
+});
+
+add_task(async function test_initialializeWithAuthErrorAndDeletedAccount() {
+ _("Verify sync state with auth error + account deleted");
+
+ var identityConfig = makeIdentityConfig();
+ var syncAuthManager = new SyncAuthManager();
+
+ // Use the real `getOAuthToken` method that calls
+ // `mockFxAClient.accessTokenWithSessionToken`.
+ let fxaInternal = makeFxAccountsInternalMock(identityConfig);
+ delete fxaInternal.getOAuthToken;
+
+ configureFxAccountIdentity(syncAuthManager, identityConfig, fxaInternal);
+ syncAuthManager._fxaService._internal.initialize();
+
+ let accessTokenWithSessionTokenCalled = false;
+ let accountStatusCalled = false;
+ let sessionStatusCalled = false;
+
+ let AuthErrorMockFxAClient = function () {
+ FxAccountsClient.apply(this);
+ };
+ AuthErrorMockFxAClient.prototype = {
+ accessTokenWithSessionToken() {
+ accessTokenWithSessionTokenCalled = true;
+ return Promise.reject({
+ code: 401,
+ errno: ERRNO_INVALID_AUTH_TOKEN,
+ });
+ },
+ accountStatus() {
+ accountStatusCalled = true;
+ return Promise.resolve(false);
+ },
+ sessionStatus() {
+ sessionStatusCalled = true;
+ return Promise.resolve(false);
+ },
+ };
+ Object.setPrototypeOf(
+ AuthErrorMockFxAClient.prototype,
+ FxAccountsClient.prototype
+ );
+
+ let mockFxAClient = new AuthErrorMockFxAClient();
+ syncAuthManager._fxaService._internal._fxAccountsClient = mockFxAClient;
+
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ AuthenticationError,
+ "should reject due to an auth error"
+ );
+
+ Assert.ok(accessTokenWithSessionTokenCalled);
+ Assert.ok(sessionStatusCalled);
+ Assert.ok(accountStatusCalled);
+ Assert.ok(!syncAuthManager._token);
+ Assert.ok(!syncAuthManager._hasValidToken());
+});
+
+add_task(async function test_getResourceAuthenticator() {
+ _(
+ "SyncAuthManager supplies a Resource Authenticator callback which returns a Hawk header."
+ );
+ configureFxAccountIdentity(globalSyncAuthManager);
+ let authenticator = globalSyncAuthManager.getResourceAuthenticator();
+ Assert.ok(!!authenticator);
+ let req = {
+ uri: CommonUtils.makeURI("https://example.net/somewhere/over/the/rainbow"),
+ method: "GET",
+ };
+ let output = await authenticator(req, "GET");
+ Assert.ok("headers" in output);
+ Assert.ok("authorization" in output.headers);
+ Assert.ok(output.headers.authorization.startsWith("Hawk"));
+ _("Expected internal state after successful call.");
+ Assert.equal(
+ globalSyncAuthManager._token.uid,
+ globalIdentityConfig.fxaccount.token.uid
+ );
+});
+
+add_task(async function test_resourceAuthenticatorSkew() {
+ _(
+ "SyncAuthManager Resource Authenticator compensates for clock skew in Hawk header."
+ );
+
+ // Clock is skewed 12 hours into the future
+ // We pick a date in the past so we don't risk concealing bugs in code that
+ // uses new Date() instead of our given date.
+ let now =
+ new Date("Fri Apr 09 2004 00:00:00 GMT-0700").valueOf() + 12 * HOUR_MS;
+ let syncAuthManager = new SyncAuthManager();
+ let hawkClient = new HawkClient("https://example.net/v1", "/foo");
+
+ // mock fxa hawk client skew
+ hawkClient.now = function () {
+ dump("mocked client now: " + now + "\n");
+ return now;
+ };
+ // Imagine there's already been one fxa request and the hawk client has
+ // already detected skew vs the fxa auth server.
+ let localtimeOffsetMsec = -1 * 12 * HOUR_MS;
+ hawkClient._localtimeOffsetMsec = localtimeOffsetMsec;
+
+ let fxaClient = new MockFxAccountsClient();
+ fxaClient.hawk = hawkClient;
+
+ // Sanity check
+ Assert.equal(hawkClient.now(), now);
+ Assert.equal(hawkClient.localtimeOffsetMsec, localtimeOffsetMsec);
+
+ // Properly picked up by the client
+ Assert.equal(fxaClient.now(), now);
+ Assert.equal(fxaClient.localtimeOffsetMsec, localtimeOffsetMsec);
+
+ let identityConfig = makeIdentityConfig();
+ let fxaInternal = makeFxAccountsInternalMock(identityConfig);
+ fxaInternal._now_is = now;
+ fxaInternal.fxAccountsClient = fxaClient;
+
+ // Mocks within mocks...
+ configureFxAccountIdentity(
+ syncAuthManager,
+ globalIdentityConfig,
+ fxaInternal
+ );
+
+ Assert.equal(syncAuthManager._fxaService._internal.now(), now);
+ Assert.equal(
+ syncAuthManager._fxaService._internal.localtimeOffsetMsec,
+ localtimeOffsetMsec
+ );
+
+ Assert.equal(syncAuthManager._fxaService._internal.now(), now);
+ Assert.equal(
+ syncAuthManager._fxaService._internal.localtimeOffsetMsec,
+ localtimeOffsetMsec
+ );
+
+ let request = new Resource("https://example.net/i/like/pie/");
+ let authenticator = syncAuthManager.getResourceAuthenticator();
+ let output = await authenticator(request, "GET");
+ dump("output" + JSON.stringify(output));
+ let authHeader = output.headers.authorization;
+ Assert.ok(authHeader.startsWith("Hawk"));
+
+ // Skew correction is applied in the header and we're within the two-minute
+ // window.
+ Assert.equal(getTimestamp(authHeader), now - 12 * HOUR_MS);
+ Assert.ok(getTimestampDelta(authHeader, now) - 12 * HOUR_MS < 2 * MINUTE_MS);
+});
+
+add_task(async function test_RESTResourceAuthenticatorSkew() {
+ _(
+ "SyncAuthManager REST Resource Authenticator compensates for clock skew in Hawk header."
+ );
+
+ // Clock is skewed 12 hours into the future from our arbitary date
+ let now =
+ new Date("Fri Apr 09 2004 00:00:00 GMT-0700").valueOf() + 12 * HOUR_MS;
+ let syncAuthManager = new SyncAuthManager();
+ let hawkClient = new HawkClient("https://example.net/v1", "/foo");
+
+ // mock fxa hawk client skew
+ hawkClient.now = function () {
+ return now;
+ };
+ // Imagine there's already been one fxa request and the hawk client has
+ // already detected skew vs the fxa auth server.
+ hawkClient._localtimeOffsetMsec = -1 * 12 * HOUR_MS;
+
+ let fxaClient = new MockFxAccountsClient();
+ fxaClient.hawk = hawkClient;
+
+ let identityConfig = makeIdentityConfig();
+ let fxaInternal = makeFxAccountsInternalMock(identityConfig);
+ fxaInternal._now_is = now;
+ fxaInternal.fxAccountsClient = fxaClient;
+
+ configureFxAccountIdentity(
+ syncAuthManager,
+ globalIdentityConfig,
+ fxaInternal
+ );
+
+ Assert.equal(syncAuthManager._fxaService._internal.now(), now);
+
+ let request = new Resource("https://example.net/i/like/pie/");
+ let authenticator = syncAuthManager.getResourceAuthenticator();
+ let output = await authenticator(request, "GET");
+ dump("output" + JSON.stringify(output));
+ let authHeader = output.headers.authorization;
+ Assert.ok(authHeader.startsWith("Hawk"));
+
+ // Skew correction is applied in the header and we're within the two-minute
+ // window.
+ Assert.equal(getTimestamp(authHeader), now - 12 * HOUR_MS);
+ Assert.ok(getTimestampDelta(authHeader, now) - 12 * HOUR_MS < 2 * MINUTE_MS);
+});
+
+add_task(async function test_ensureLoggedIn() {
+ configureFxAccountIdentity(globalSyncAuthManager);
+ await globalSyncAuthManager._ensureValidToken();
+ Assert.equal(Status.login, LOGIN_SUCCEEDED, "original initialize worked");
+ Assert.ok(globalSyncAuthManager._token);
+
+ // arrange for no logged in user.
+ let fxa = globalSyncAuthManager._fxaService;
+ let signedInUser =
+ fxa._internal.currentAccountState.storageManager.accountData;
+ fxa._internal.currentAccountState.storageManager.accountData = null;
+ await Assert.rejects(
+ globalSyncAuthManager._ensureValidToken(true),
+ /no user is logged in/,
+ "expecting rejection due to no user"
+ );
+ // Restore the logged in user to what it was.
+ fxa._internal.currentAccountState.storageManager.accountData = signedInUser;
+ Status.login = LOGIN_FAILED_LOGIN_REJECTED;
+ await globalSyncAuthManager._ensureValidToken(true);
+ Assert.equal(Status.login, LOGIN_SUCCEEDED, "final ensureLoggedIn worked");
+});
+
+add_task(async function test_syncState() {
+ // Avoid polling for an unverified user.
+ let identityConfig = makeIdentityConfig();
+ let fxaInternal = makeFxAccountsInternalMock(identityConfig);
+ fxaInternal.startVerifiedCheck = () => {};
+ configureFxAccountIdentity(
+ globalSyncAuthManager,
+ globalIdentityConfig,
+ fxaInternal
+ );
+
+ // arrange for no logged in user.
+ let fxa = globalSyncAuthManager._fxaService;
+ let signedInUser =
+ fxa._internal.currentAccountState.storageManager.accountData;
+ fxa._internal.currentAccountState.storageManager.accountData = null;
+ await Assert.rejects(
+ globalSyncAuthManager._ensureValidToken(true),
+ /no user is logged in/,
+ "expecting rejection due to no user"
+ );
+ // Restore to an unverified user.
+ Services.prefs.setStringPref("services.sync.username", signedInUser.email);
+ signedInUser.verified = false;
+ fxa._internal.currentAccountState.storageManager.accountData = signedInUser;
+ Status.login = LOGIN_FAILED_LOGIN_REJECTED;
+ // The sync_auth observers are async, so call them directly.
+ await globalSyncAuthManager.observe(null, ONLOGIN_NOTIFICATION, "");
+ Assert.equal(
+ Status.login,
+ LOGIN_FAILED_LOGIN_REJECTED,
+ "should not have changed the login state for an unverified user"
+ );
+
+ // now pretend the user because verified.
+ signedInUser.verified = true;
+ await globalSyncAuthManager.observe(null, ONVERIFIED_NOTIFICATION, "");
+ Assert.equal(
+ Status.login,
+ LOGIN_SUCCEEDED,
+ "should have changed the login state to success"
+ );
+});
+
+add_task(async function test_tokenExpiration() {
+ _("SyncAuthManager notices token expiration:");
+ let bimExp = new SyncAuthManager();
+ configureFxAccountIdentity(bimExp, globalIdentityConfig);
+
+ let authenticator = bimExp.getResourceAuthenticator();
+ Assert.ok(!!authenticator);
+ let req = {
+ uri: CommonUtils.makeURI("https://example.net/somewhere/over/the/rainbow"),
+ method: "GET",
+ };
+ await authenticator(req, "GET");
+
+ // Mock the clock.
+ _("Forcing the token to expire ...");
+ Object.defineProperty(bimExp, "_now", {
+ value: function customNow() {
+ return Date.now() + 3000001;
+ },
+ writable: true,
+ });
+ Assert.ok(bimExp._token.expiration < bimExp._now());
+ _("... means SyncAuthManager knows to re-fetch it on the next call.");
+ Assert.ok(!bimExp._hasValidToken());
+});
+
+add_task(async function test_getTokenErrors() {
+ _("SyncAuthManager correctly handles various failures to get a token.");
+
+ _("Arrange for a 401 - Sync should reflect an auth error.");
+ initializeIdentityWithTokenServerResponse({
+ status: 401,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({}),
+ });
+ let syncAuthManager = Service.identity;
+
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ AuthenticationError,
+ "should reject due to 401"
+ );
+ Assert.equal(Status.login, LOGIN_FAILED_LOGIN_REJECTED, "login was rejected");
+
+ // XXX - other interesting responses to return?
+
+ // And for good measure, some totally "unexpected" errors - we generally
+ // assume these problems are going to magically go away at some point.
+ _(
+ "Arrange for an empty body with a 200 response - should reflect a network error."
+ );
+ initializeIdentityWithTokenServerResponse({
+ status: 200,
+ headers: [],
+ body: "",
+ });
+ syncAuthManager = Service.identity;
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ TokenServerClientServerError,
+ "should reject due to non-JSON response"
+ );
+ Assert.equal(
+ Status.login,
+ LOGIN_FAILED_NETWORK_ERROR,
+ "login state is LOGIN_FAILED_NETWORK_ERROR"
+ );
+});
+
+add_task(async function test_refreshAccessTokenOn401() {
+ _("SyncAuthManager refreshes the FXA OAuth access token after a 401.");
+ var identityConfig = makeIdentityConfig();
+ var syncAuthManager = new SyncAuthManager();
+ // Use the real `getOAuthToken` method that calls
+ // `mockFxAClient.accessTokenWithSessionToken`.
+ let fxaInternal = makeFxAccountsInternalMock(identityConfig);
+ delete fxaInternal.getOAuthToken;
+ configureFxAccountIdentity(syncAuthManager, identityConfig, fxaInternal);
+ syncAuthManager._fxaService._internal.initialize();
+
+ let getTokenCount = 0;
+
+ let CheckSignMockFxAClient = function () {
+ FxAccountsClient.apply(this);
+ };
+ CheckSignMockFxAClient.prototype = {
+ accessTokenWithSessionToken() {
+ ++getTokenCount;
+ return Promise.resolve({ access_token: "token" });
+ },
+ };
+ Object.setPrototypeOf(
+ CheckSignMockFxAClient.prototype,
+ FxAccountsClient.prototype
+ );
+
+ let mockFxAClient = new CheckSignMockFxAClient();
+ syncAuthManager._fxaService._internal._fxAccountsClient = mockFxAClient;
+
+ let didReturn401 = false;
+ let didReturn200 = false;
+ let mockTSC = mockTokenServer(() => {
+ if (getTokenCount <= 1) {
+ didReturn401 = true;
+ return {
+ status: 401,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({}),
+ };
+ }
+ didReturn200 = true;
+ return {
+ status: 200,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({
+ id: "id",
+ key: "key",
+ api_endpoint: "http://example.com/",
+ uid: "uid",
+ duration: 300,
+ }),
+ };
+ });
+
+ syncAuthManager._tokenServerClient = mockTSC;
+
+ await syncAuthManager._ensureValidToken();
+
+ Assert.equal(getTokenCount, 2);
+ Assert.ok(didReturn401);
+ Assert.ok(didReturn200);
+ Assert.ok(syncAuthManager._token);
+ Assert.ok(syncAuthManager._hasValidToken());
+});
+
+add_task(async function test_getTokenErrorWithRetry() {
+ _("tokenserver sends an observer notification on various backoff headers.");
+
+ // Set Sync's backoffInterval to zero - after we simulated the backoff header
+ // it should reflect the value we sent.
+ Status.backoffInterval = 0;
+ _("Arrange for a 503 with a Retry-After header.");
+ initializeIdentityWithTokenServerResponse({
+ status: 503,
+ headers: { "content-type": "application/json", "retry-after": "100" },
+ body: JSON.stringify({}),
+ });
+ let syncAuthManager = Service.identity;
+
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ TokenServerClientServerError,
+ "should reject due to 503"
+ );
+
+ // The observer should have fired - check it got the value in the response.
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR, "login was rejected");
+ // Sync will have the value in ms with some slop - so check it is at least that.
+ Assert.ok(Status.backoffInterval >= 100000);
+
+ _("Arrange for a 200 with an X-Backoff header.");
+ Status.backoffInterval = 0;
+ initializeIdentityWithTokenServerResponse({
+ status: 503,
+ headers: { "content-type": "application/json", "x-backoff": "200" },
+ body: JSON.stringify({}),
+ });
+ syncAuthManager = Service.identity;
+
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ TokenServerClientServerError,
+ "should reject due to no token in response"
+ );
+
+ // The observer should have fired - check it got the value in the response.
+ Assert.ok(Status.backoffInterval >= 200000);
+});
+
+add_task(async function test_getKeysErrorWithBackoff() {
+ _(
+ "Auth server (via hawk) sends an observer notification on backoff headers."
+ );
+
+ // Set Sync's backoffInterval to zero - after we simulated the backoff header
+ // it should reflect the value we sent.
+ Status.backoffInterval = 0;
+ _("Arrange for a 503 with a X-Backoff header.");
+
+ let config = makeIdentityConfig();
+ // We want no scopedKeys so we attempt to fetch them.
+ delete config.fxaccount.user.scopedKeys;
+ config.fxaccount.user.keyFetchToken = "keyfetchtoken";
+ await initializeIdentityWithHAWKResponseFactory(
+ config,
+ function (method, data, uri) {
+ Assert.equal(method, "get");
+ Assert.equal(uri, "http://mockedserver:9999/account/keys");
+ return {
+ status: 503,
+ headers: { "content-type": "application/json", "x-backoff": "100" },
+ body: "{}",
+ };
+ }
+ );
+
+ let syncAuthManager = Service.identity;
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ TokenServerClientServerError,
+ "should reject due to 503"
+ );
+
+ // The observer should have fired - check it got the value in the response.
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR, "login was rejected");
+ // Sync will have the value in ms with some slop - so check it is at least that.
+ Assert.ok(Status.backoffInterval >= 100000);
+});
+
+add_task(async function test_getKeysErrorWithRetry() {
+ _("Auth server (via hawk) sends an observer notification on retry headers.");
+
+ // Set Sync's backoffInterval to zero - after we simulated the backoff header
+ // it should reflect the value we sent.
+ Status.backoffInterval = 0;
+ _("Arrange for a 503 with a Retry-After header.");
+
+ let config = makeIdentityConfig();
+ // We want no scopedKeys so we attempt to fetch them.
+ delete config.fxaccount.user.scopedKeys;
+ config.fxaccount.user.keyFetchToken = "keyfetchtoken";
+ await initializeIdentityWithHAWKResponseFactory(
+ config,
+ function (method, data, uri) {
+ Assert.equal(method, "get");
+ Assert.equal(uri, "http://mockedserver:9999/account/keys");
+ return {
+ status: 503,
+ headers: { "content-type": "application/json", "retry-after": "100" },
+ body: "{}",
+ };
+ }
+ );
+
+ let syncAuthManager = Service.identity;
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ TokenServerClientServerError,
+ "should reject due to 503"
+ );
+
+ // The observer should have fired - check it got the value in the response.
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR, "login was rejected");
+ // Sync will have the value in ms with some slop - so check it is at least that.
+ Assert.ok(Status.backoffInterval >= 100000);
+});
+
+add_task(async function test_getHAWKErrors() {
+ _("SyncAuthManager correctly handles various HAWK failures.");
+
+ _("Arrange for a 401 - Sync should reflect an auth error.");
+ let config = makeIdentityConfig();
+ await initializeIdentityWithHAWKResponseFactory(
+ config,
+ function (method, data, uri) {
+ if (uri == "http://mockedserver:9999/oauth/token") {
+ Assert.equal(method, "post");
+ return {
+ status: 401,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({
+ code: 401,
+ errno: 110,
+ error: "invalid token",
+ }),
+ };
+ }
+ // For any follow-up requests that check account status.
+ return {
+ status: 200,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({}),
+ };
+ }
+ );
+ Assert.equal(Status.login, LOGIN_FAILED_LOGIN_REJECTED, "login was rejected");
+
+ // XXX - other interesting responses to return?
+
+ // And for good measure, some totally "unexpected" errors - we generally
+ // assume these problems are going to magically go away at some point.
+ _(
+ "Arrange for an empty body with a 200 response - should reflect a network error."
+ );
+ await initializeIdentityWithHAWKResponseFactory(
+ config,
+ function (method, data, uri) {
+ Assert.equal(method, "post");
+ Assert.equal(uri, "http://mockedserver:9999/oauth/token");
+ return {
+ status: 200,
+ headers: [],
+ body: "",
+ };
+ }
+ );
+ Assert.equal(
+ Status.login,
+ LOGIN_FAILED_NETWORK_ERROR,
+ "login state is LOGIN_FAILED_NETWORK_ERROR"
+ );
+});
+
+add_task(async function test_getGetKeysFailing401() {
+ _("SyncAuthManager correctly handles 401 responses fetching keys.");
+
+ _("Arrange for a 401 - Sync should reflect an auth error.");
+ let config = makeIdentityConfig();
+ // We want no scopedKeys so we attempt to fetch them.
+ delete config.fxaccount.user.scopedKeys;
+ config.fxaccount.user.keyFetchToken = "keyfetchtoken";
+ await initializeIdentityWithHAWKResponseFactory(
+ config,
+ function (method, data, uri) {
+ Assert.equal(method, "get");
+ Assert.equal(uri, "http://mockedserver:9999/account/keys");
+ return {
+ status: 401,
+ headers: { "content-type": "application/json" },
+ body: "{}",
+ };
+ }
+ );
+ Assert.equal(Status.login, LOGIN_FAILED_LOGIN_REJECTED, "login was rejected");
+});
+
+add_task(async function test_getGetKeysFailing503() {
+ _("SyncAuthManager correctly handles 5XX responses fetching keys.");
+
+ _("Arrange for a 503 - Sync should reflect a network error.");
+ let config = makeIdentityConfig();
+ // We want no scopedKeys so we attempt to fetch them.
+ delete config.fxaccount.user.scopedKeys;
+ config.fxaccount.user.keyFetchToken = "keyfetchtoken";
+ await initializeIdentityWithHAWKResponseFactory(
+ config,
+ function (method, data, uri) {
+ Assert.equal(method, "get");
+ Assert.equal(uri, "http://mockedserver:9999/account/keys");
+ return {
+ status: 503,
+ headers: { "content-type": "application/json" },
+ body: "{}",
+ };
+ }
+ );
+ Assert.equal(
+ Status.login,
+ LOGIN_FAILED_NETWORK_ERROR,
+ "state reflects network error"
+ );
+});
+
+add_task(async function test_getKeysMissing() {
+ _(
+ "SyncAuthManager correctly handles getKeyForScope succeeding but not returning the key."
+ );
+
+ let syncAuthManager = new SyncAuthManager();
+ let identityConfig = makeIdentityConfig();
+ // our mock identity config already has scopedKeys remove them or we never
+ // try and fetch them.
+ delete identityConfig.fxaccount.user.scopedKeys;
+ identityConfig.fxaccount.user.keyFetchToken = "keyFetchToken";
+
+ configureFxAccountIdentity(syncAuthManager, identityConfig);
+
+ // Mock a fxAccounts object
+ let fxa = new FxAccounts({
+ fxAccountsClient: new MockFxAccountsClient(),
+ newAccountState(credentials) {
+ // We only expect this to be called with null indicating the (mock)
+ // storage should be read.
+ if (credentials) {
+ throw new Error("Not expecting to have credentials passed");
+ }
+ let storageManager = new MockFxaStorageManager();
+ storageManager.initialize(identityConfig.fxaccount.user);
+ return new AccountState(storageManager);
+ },
+ // And the keys object with a mock that returns no keys.
+ keys: {
+ getKeyForScope() {
+ return Promise.resolve(null);
+ },
+ },
+ });
+
+ syncAuthManager._fxaService = fxa;
+
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ /browser does not have the sync key, cannot sync/
+ );
+});
+
+add_task(async function test_getKeysUnexpecedError() {
+ _(
+ "SyncAuthManager correctly handles getKeyForScope throwing an unexpected error."
+ );
+
+ let syncAuthManager = new SyncAuthManager();
+ let identityConfig = makeIdentityConfig();
+ // our mock identity config already has scopedKeys - remove them or we never
+ // try and fetch them.
+ delete identityConfig.fxaccount.user.scopedKeys;
+ identityConfig.fxaccount.user.keyFetchToken = "keyFetchToken";
+
+ configureFxAccountIdentity(syncAuthManager, identityConfig);
+
+ // Mock a fxAccounts object
+ let fxa = new FxAccounts({
+ fxAccountsClient: new MockFxAccountsClient(),
+ newAccountState(credentials) {
+ // We only expect this to be called with null indicating the (mock)
+ // storage should be read.
+ if (credentials) {
+ throw new Error("Not expecting to have credentials passed");
+ }
+ let storageManager = new MockFxaStorageManager();
+ storageManager.initialize(identityConfig.fxaccount.user);
+ return new AccountState(storageManager);
+ },
+ // And the keys object with a mock that returns no keys.
+ keys: {
+ async getKeyForScope() {
+ throw new Error("well that was unexpected");
+ },
+ },
+ });
+
+ syncAuthManager._fxaService = fxa;
+
+ await Assert.rejects(
+ syncAuthManager._ensureValidToken(),
+ /well that was unexpected/
+ );
+});
+
+add_task(async function test_signedInUserMissing() {
+ _(
+ "SyncAuthManager detects getSignedInUser returning incomplete account data"
+ );
+
+ let syncAuthManager = new SyncAuthManager();
+ // Delete stored keys and the key fetch token.
+ delete globalIdentityConfig.fxaccount.user.scopedKeys;
+ delete globalIdentityConfig.fxaccount.user.keyFetchToken;
+
+ configureFxAccountIdentity(syncAuthManager, globalIdentityConfig);
+
+ let fxa = new FxAccounts({
+ fetchAndUnwrapKeys() {
+ return Promise.resolve({});
+ },
+ fxAccountsClient: new MockFxAccountsClient(),
+ newAccountState(credentials) {
+ // We only expect this to be called with null indicating the (mock)
+ // storage should be read.
+ if (credentials) {
+ throw new Error("Not expecting to have credentials passed");
+ }
+ let storageManager = new MockFxaStorageManager();
+ storageManager.initialize(globalIdentityConfig.fxaccount.user);
+ return new AccountState(storageManager);
+ },
+ });
+
+ syncAuthManager._fxaService = fxa;
+
+ let status = await syncAuthManager.unlockAndVerifyAuthState();
+ Assert.equal(status, LOGIN_FAILED_LOGIN_REJECTED);
+});
+
+// End of tests
+// Utility functions follow
+
+// Create a new sync_auth object and initialize it with a
+// hawk mock that simulates HTTP responses.
+// The callback function will be called each time the mocked hawk server wants
+// to make a request. The result of the callback should be the mock response
+// object that will be returned to hawk.
+// A token server mock will be used that doesn't hit a server, so we move
+// directly to a hawk request.
+async function initializeIdentityWithHAWKResponseFactory(
+ config,
+ cbGetResponse
+) {
+ // A mock request object.
+ function MockRESTRequest(uri, credentials, extra) {
+ this._uri = uri;
+ this._credentials = credentials;
+ this._extra = extra;
+ }
+ MockRESTRequest.prototype = {
+ setHeader() {},
+ async post(data) {
+ this.response = cbGetResponse(
+ "post",
+ data,
+ this._uri,
+ this._credentials,
+ this._extra
+ );
+ return this.response;
+ },
+ async get() {
+ // Skip /status requests (sync_auth checks if the account still
+ // exists after an auth error)
+ if (this._uri.startsWith("http://mockedserver:9999/account/status")) {
+ this.response = {
+ status: 200,
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({ exists: true }),
+ };
+ } else {
+ this.response = cbGetResponse(
+ "get",
+ null,
+ this._uri,
+ this._credentials,
+ this._extra
+ );
+ }
+ return this.response;
+ },
+ };
+
+ // The hawk client.
+ function MockedHawkClient() {}
+ MockedHawkClient.prototype = new HawkClient("http://mockedserver:9999");
+ MockedHawkClient.prototype.constructor = MockedHawkClient;
+ MockedHawkClient.prototype.newHAWKAuthenticatedRESTRequest = function (
+ uri,
+ credentials,
+ extra
+ ) {
+ return new MockRESTRequest(uri, credentials, extra);
+ };
+ // Arrange for the same observerPrefix as FxAccountsClient uses
+ MockedHawkClient.prototype.observerPrefix = "FxA:hawk";
+
+ // tie it all together - configureFxAccountIdentity isn't useful here :(
+ let fxaClient = new MockFxAccountsClient();
+ fxaClient.hawk = new MockedHawkClient();
+ let internal = {
+ fxAccountsClient: fxaClient,
+ newAccountState(credentials) {
+ // We only expect this to be called with null indicating the (mock)
+ // storage should be read.
+ if (credentials) {
+ throw new Error("Not expecting to have credentials passed");
+ }
+ let storageManager = new MockFxaStorageManager();
+ storageManager.initialize(config.fxaccount.user);
+ return new AccountState(storageManager);
+ },
+ };
+ let fxa = new FxAccounts(internal);
+
+ globalSyncAuthManager._fxaService = fxa;
+ await Assert.rejects(
+ globalSyncAuthManager._ensureValidToken(true),
+ // TODO: Ideally this should have a specific check for an error.
+ () => true,
+ "expecting rejection due to hawk error"
+ );
+}
+
+function getTimestamp(hawkAuthHeader) {
+ return parseInt(/ts="(\d+)"/.exec(hawkAuthHeader)[1], 10) * SECOND_MS;
+}
+
+function getTimestampDelta(hawkAuthHeader, now = Date.now()) {
+ return Math.abs(getTimestamp(hawkAuthHeader) - now);
+}
+
+function mockTokenServer(func) {
+ let requestLog = Log.repository.getLogger("testing.mock-rest");
+ if (!requestLog.appenders.length) {
+ // might as well see what it says :)
+ requestLog.addAppender(new Log.DumpAppender());
+ requestLog.level = Log.Level.Trace;
+ }
+ function MockRESTRequest(url) {}
+ MockRESTRequest.prototype = {
+ _log: requestLog,
+ setHeader() {},
+ async get() {
+ this.response = func();
+ return this.response;
+ },
+ };
+ // The mocked TokenServer client which will get the response.
+ function MockTSC() {}
+ MockTSC.prototype = new TokenServerClient();
+ MockTSC.prototype.constructor = MockTSC;
+ MockTSC.prototype.newRESTRequest = function (url) {
+ return new MockRESTRequest(url);
+ };
+ // Arrange for the same observerPrefix as sync_auth uses.
+ MockTSC.prototype.observerPrefix = "weave:service";
+ return new MockTSC();
+}
diff --git a/services/sync/tests/unit/test_syncedtabs.js b/services/sync/tests/unit/test_syncedtabs.js
new file mode 100644
index 0000000000..79ab3e0686
--- /dev/null
+++ b/services/sync/tests/unit/test_syncedtabs.js
@@ -0,0 +1,342 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*-
+ * vim:set ts=2 sw=2 sts=2 et:
+ */
+"use strict";
+
+const { Weave } = ChromeUtils.importESModule(
+ "resource://services-sync/main.sys.mjs"
+);
+const { SyncedTabs } = ChromeUtils.importESModule(
+ "resource://services-sync/SyncedTabs.sys.mjs"
+);
+
+Log.repository.getLogger("Sync.RemoteTabs").addAppender(new Log.DumpAppender());
+
+// A mock "Tabs" engine which the SyncedTabs module will use instead of the real
+// engine. We pass a constructor that Sync creates.
+function MockTabsEngine() {
+ this.clients = {}; // We'll set this dynamically
+ // Mock fxAccounts + recentDeviceList as if we hit the FxA server
+ this.fxAccounts = {
+ device: {
+ recentDeviceList: [
+ {
+ id: 1,
+ name: "updated desktop name",
+ availableCommands: {
+ "https://identity.mozilla.com/cmd/open-uri": "baz",
+ },
+ },
+ {
+ id: 2,
+ name: "updated mobile name",
+ availableCommands: {
+ "https://identity.mozilla.com/cmd/open-uri": "boo",
+ },
+ },
+ ],
+ },
+ };
+}
+
+MockTabsEngine.prototype = {
+ name: "tabs",
+ enabled: true,
+
+ getAllClients() {
+ return Object.values(this.clients);
+ },
+
+ getOpenURLs() {
+ return new Set();
+ },
+};
+
+let tabsEngine;
+
+// A clients engine that doesn't need to be a constructor.
+let MockClientsEngine = {
+ clientSettings: null, // Set in `configureClients`.
+
+ isMobile(guid) {
+ if (!guid.endsWith("desktop") && !guid.endsWith("mobile")) {
+ throw new Error(
+ "this module expected guids to end with 'desktop' or 'mobile'"
+ );
+ }
+ return guid.endsWith("mobile");
+ },
+ remoteClientExists(id) {
+ return this.clientSettings[id] !== false;
+ },
+ getClientName(id) {
+ if (this.clientSettings[id]) {
+ return this.clientSettings[id];
+ }
+ let client = tabsEngine.clients[id];
+ let fxaDevice = tabsEngine.fxAccounts.device.recentDeviceList.find(
+ device => device.id === client.fxaDeviceId
+ );
+ return fxaDevice ? fxaDevice.name : client.clientName;
+ },
+
+ getClientFxaDeviceId(id) {
+ if (this.clientSettings[id]) {
+ return this.clientSettings[id];
+ }
+ return tabsEngine.clients[id].fxaDeviceId;
+ },
+
+ getClientType(id) {
+ return "desktop";
+ },
+};
+
+function configureClients(clients, clientSettings = {}) {
+ // each client record is expected to have an id.
+ for (let [guid, client] of Object.entries(clients)) {
+ client.id = guid;
+ }
+ tabsEngine.clients = clients;
+ // Apply clients collection overrides.
+ MockClientsEngine.clientSettings = clientSettings;
+ // Send an observer that pretends the engine just finished a sync.
+ Services.obs.notifyObservers(null, "weave:engine:sync:finish", "tabs");
+}
+
+add_task(async function setup() {
+ await Weave.Service.promiseInitialized;
+ // Configure Sync with our mock tabs engine and force it to become initialized.
+ await Weave.Service.engineManager.unregister("tabs");
+ await Weave.Service.engineManager.register(MockTabsEngine);
+ Weave.Service.clientsEngine = MockClientsEngine;
+ tabsEngine = Weave.Service.engineManager.get("tabs");
+
+ // Tell the Sync XPCOM service it is initialized.
+ let weaveXPCService = Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+ weaveXPCService.ready = true;
+});
+
+// The tests.
+add_task(async function test_noClients() {
+ // no clients, can't be tabs.
+ await configureClients({});
+
+ let tabs = await SyncedTabs.getTabClients();
+ equal(Object.keys(tabs).length, 0);
+});
+
+add_task(async function test_clientWithTabs() {
+ await configureClients({
+ guid_desktop: {
+ clientName: "My Desktop",
+ tabs: [
+ {
+ urlHistory: ["http://foo.com/"],
+ icon: "http://foo.com/favicon",
+ lastUsed: 1655745700, // Mon, 20 Jun 2022 17:21:40 GMT
+ },
+ ],
+ },
+ guid_mobile: {
+ clientName: "My Phone",
+ tabs: [],
+ },
+ });
+
+ let clients = await SyncedTabs.getTabClients();
+ equal(clients.length, 2);
+ clients.sort((a, b) => {
+ return a.name.localeCompare(b.name);
+ });
+ equal(clients[0].tabs.length, 1);
+ equal(clients[0].tabs[0].url, "http://foo.com/");
+ equal(clients[0].tabs[0].icon, "http://foo.com/favicon");
+ equal(clients[0].tabs[0].lastUsed, 1655745700);
+ // second client has no tabs.
+ equal(clients[1].tabs.length, 0);
+});
+
+add_task(async function test_staleClientWithTabs() {
+ await configureClients(
+ {
+ guid_desktop: {
+ clientName: "My Desktop",
+ tabs: [
+ {
+ urlHistory: ["http://foo.com/"],
+ icon: "http://foo.com/favicon",
+ lastUsed: 1655745750,
+ },
+ ],
+ },
+ guid_mobile: {
+ clientName: "My Phone",
+ tabs: [],
+ },
+ guid_stale_mobile: {
+ clientName: "My Deleted Phone",
+ tabs: [],
+ },
+ guid_stale_desktop: {
+ clientName: "My Deleted Laptop",
+ tabs: [
+ {
+ urlHistory: ["https://bar.com/"],
+ icon: "https://bar.com/favicon",
+ lastUsed: 1655745700,
+ },
+ ],
+ },
+ guid_stale_name_desktop: {
+ clientName: "My Generic Device",
+ tabs: [
+ {
+ urlHistory: ["https://example.edu/"],
+ icon: "https://example.edu/favicon",
+ lastUsed: 1655745800,
+ },
+ ],
+ },
+ },
+ {
+ guid_stale_mobile: false,
+ guid_stale_desktop: false,
+ // We should always use the device name from the clients collection, instead
+ // of the possibly stale tabs collection.
+ guid_stale_name_desktop: "My Laptop",
+ }
+ );
+ let clients = await SyncedTabs.getTabClients();
+ clients.sort((a, b) => {
+ return a.name.localeCompare(b.name);
+ });
+ equal(clients.length, 3);
+ equal(clients[0].name, "My Desktop");
+ equal(clients[0].tabs.length, 1);
+ equal(clients[0].tabs[0].url, "http://foo.com/");
+ equal(clients[0].tabs[0].lastUsed, 1655745750);
+ equal(clients[1].name, "My Laptop");
+ equal(clients[1].tabs.length, 1);
+ equal(clients[1].tabs[0].url, "https://example.edu/");
+ equal(clients[1].tabs[0].lastUsed, 1655745800);
+ equal(clients[2].name, "My Phone");
+ equal(clients[2].tabs.length, 0);
+});
+
+add_task(async function test_clientWithTabsIconsDisabled() {
+ Services.prefs.setBoolPref("services.sync.syncedTabs.showRemoteIcons", false);
+ await configureClients({
+ guid_desktop: {
+ clientName: "My Desktop",
+ tabs: [
+ {
+ urlHistory: ["http://foo.com/"],
+ icon: "http://foo.com/favicon",
+ },
+ ],
+ },
+ });
+
+ let clients = await SyncedTabs.getTabClients();
+ equal(clients.length, 1);
+ clients.sort((a, b) => {
+ return a.name.localeCompare(b.name);
+ });
+ equal(clients[0].tabs.length, 1);
+ equal(clients[0].tabs[0].url, "http://foo.com/");
+ // Expect the default favicon due to the pref being false.
+ equal(clients[0].tabs[0].icon, "page-icon:http://foo.com/");
+ Services.prefs.clearUserPref("services.sync.syncedTabs.showRemoteIcons");
+});
+
+add_task(async function test_filter() {
+ // Nothing matches.
+ await configureClients({
+ guid_desktop: {
+ clientName: "My Desktop",
+ tabs: [
+ {
+ urlHistory: ["http://foo.com/"],
+ title: "A test page.",
+ },
+ {
+ urlHistory: ["http://bar.com/"],
+ title: "Another page.",
+ },
+ ],
+ },
+ });
+
+ let clients = await SyncedTabs.getTabClients("foo");
+ equal(clients.length, 1);
+ equal(clients[0].tabs.length, 1);
+ equal(clients[0].tabs[0].url, "http://foo.com/");
+ // check it matches the title.
+ clients = await SyncedTabs.getTabClients("test");
+ equal(clients.length, 1);
+ equal(clients[0].tabs.length, 1);
+ equal(clients[0].tabs[0].url, "http://foo.com/");
+});
+
+add_task(async function test_duplicatesTabsAcrossClients() {
+ await configureClients({
+ guid_desktop: {
+ clientName: "My Desktop",
+ tabs: [
+ {
+ urlHistory: ["http://foo.com/"],
+ title: "A test page.",
+ },
+ ],
+ },
+ guid_mobile: {
+ clientName: "My Phone",
+ tabs: [
+ {
+ urlHistory: ["http://foo.com/"],
+ title: "A test page.",
+ },
+ ],
+ },
+ });
+
+ let clients = await SyncedTabs.getTabClients();
+ equal(clients.length, 2);
+ equal(clients[0].tabs.length, 1);
+ equal(clients[1].tabs.length, 1);
+ equal(clients[0].tabs[0].url, "http://foo.com/");
+ equal(clients[1].tabs[0].url, "http://foo.com/");
+});
+
+add_task(async function test_clientsTabUpdatedName() {
+ // See the "fxAccounts" object in the MockEngine above for the device list
+ await configureClients({
+ guid_desktop: {
+ clientName: "My Desktop",
+ tabs: [
+ {
+ urlHistory: ["http://foo.com/"],
+ icon: "http://foo.com/favicon",
+ },
+ ],
+ fxaDeviceId: 1,
+ },
+ guid_mobile: {
+ clientName: "My Phone",
+ tabs: [
+ {
+ urlHistory: ["http://bar.com/"],
+ icon: "http://bar.com/favicon",
+ },
+ ],
+ fxaDeviceId: 2,
+ },
+ });
+ let clients = await SyncedTabs.getTabClients();
+ equal(clients.length, 2);
+ equal(clients[0].name, "updated desktop name");
+ equal(clients[1].name, "updated mobile name");
+});
diff --git a/services/sync/tests/unit/test_syncengine.js b/services/sync/tests/unit/test_syncengine.js
new file mode 100644
index 0000000000..50995a4e40
--- /dev/null
+++ b/services/sync/tests/unit/test_syncengine.js
@@ -0,0 +1,302 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+async function makeSteamEngine() {
+ let engine = new SyncEngine("Steam", Service);
+ await engine.initialize();
+ return engine;
+}
+
+function guidSetOfSize(length) {
+ return new SerializableSet(Array.from({ length }, () => Utils.makeGUID()));
+}
+
+function assertSetsEqual(a, b) {
+ // Assert.deepEqual doesn't understand Set.
+ Assert.deepEqual(Array.from(a).sort(), Array.from(b).sort());
+}
+
+async function testSteamEngineStorage(test) {
+ try {
+ let setupEngine = await makeSteamEngine();
+
+ if (test.setup) {
+ await test.setup(setupEngine);
+ }
+
+ // Finalize the engine to flush the backlog and previous failed to disk.
+ await setupEngine.finalize();
+
+ if (test.beforeCheck) {
+ await test.beforeCheck();
+ }
+
+ let checkEngine = await makeSteamEngine();
+ await test.check(checkEngine);
+
+ await checkEngine.resetClient();
+ await checkEngine.finalize();
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+}
+
+let server;
+
+add_task(async function setup() {
+ server = httpd_setup({});
+});
+
+add_task(async function test_url_attributes() {
+ _("SyncEngine url attributes");
+ await SyncTestingInfrastructure(server);
+ Service.clusterURL = "https://cluster/1.1/foo/";
+ let engine = await makeSteamEngine();
+ try {
+ Assert.equal(engine.storageURL, "https://cluster/1.1/foo/storage/");
+ Assert.equal(engine.engineURL, "https://cluster/1.1/foo/storage/steam");
+ Assert.equal(engine.metaURL, "https://cluster/1.1/foo/storage/meta/global");
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_syncID() {
+ _("SyncEngine.syncID corresponds to preference");
+ await SyncTestingInfrastructure(server);
+ let engine = await makeSteamEngine();
+ try {
+ // Ensure pristine environment
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("steam.syncID"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.equal(await engine.getSyncID(), "");
+
+ // Performing the first get on the attribute will generate a new GUID.
+ Assert.equal(await engine.resetLocalSyncID(), "fake-guid-00");
+ Assert.equal(Svc.PrefBranch.getStringPref("steam.syncID"), "fake-guid-00");
+
+ Svc.PrefBranch.setStringPref("steam.syncID", Utils.makeGUID());
+ Assert.equal(Svc.PrefBranch.getStringPref("steam.syncID"), "fake-guid-01");
+ Assert.equal(await engine.getSyncID(), "fake-guid-01");
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_lastSync() {
+ _("SyncEngine.lastSync corresponds to preferences");
+ await SyncTestingInfrastructure(server);
+ let engine = await makeSteamEngine();
+ try {
+ // Ensure pristine environment
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("steam.lastSync"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.equal(await engine.getLastSync(), 0);
+
+ // Floats are properly stored as floats and synced with the preference
+ await engine.setLastSync(123.45);
+ Assert.equal(await engine.getLastSync(), 123.45);
+ Assert.equal(Svc.PrefBranch.getStringPref("steam.lastSync"), "123.45");
+
+ // Integer is properly stored
+ await engine.setLastSync(67890);
+ Assert.equal(await engine.getLastSync(), 67890);
+ Assert.equal(Svc.PrefBranch.getStringPref("steam.lastSync"), "67890");
+
+ // resetLastSync() resets the value (and preference) to 0
+ await engine.resetLastSync();
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(Svc.PrefBranch.getStringPref("steam.lastSync"), "0");
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_toFetch() {
+ _("SyncEngine.toFetch corresponds to file on disk");
+ await SyncTestingInfrastructure(server);
+
+ await testSteamEngineStorage({
+ toFetch: guidSetOfSize(3),
+ setup(engine) {
+ // Ensure pristine environment
+ Assert.equal(engine.toFetch.size, 0);
+
+ // Write file to disk
+ engine.toFetch = this.toFetch;
+ Assert.equal(engine.toFetch, this.toFetch);
+ },
+ check(engine) {
+ // toFetch is written asynchronously
+ assertSetsEqual(engine.toFetch, this.toFetch);
+ },
+ });
+
+ await testSteamEngineStorage({
+ toFetch: guidSetOfSize(4),
+ toFetch2: guidSetOfSize(5),
+ setup(engine) {
+ // Make sure it work for consecutive writes before the callback is executed.
+ engine.toFetch = this.toFetch;
+ Assert.equal(engine.toFetch, this.toFetch);
+
+ engine.toFetch = this.toFetch2;
+ Assert.equal(engine.toFetch, this.toFetch2);
+ },
+ check(engine) {
+ assertSetsEqual(engine.toFetch, this.toFetch2);
+ },
+ });
+
+ await testSteamEngineStorage({
+ toFetch: guidSetOfSize(2),
+ async beforeCheck() {
+ let toFetchPath = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ "toFetch",
+ "steam.json"
+ );
+ await IOUtils.writeJSON(toFetchPath, this.toFetch, {
+ tmpPath: toFetchPath + ".tmp",
+ });
+ },
+ check(engine) {
+ // Read file from disk
+ assertSetsEqual(engine.toFetch, this.toFetch);
+ },
+ });
+});
+
+add_task(async function test_previousFailed() {
+ _("SyncEngine.previousFailed corresponds to file on disk");
+ await SyncTestingInfrastructure(server);
+
+ await testSteamEngineStorage({
+ previousFailed: guidSetOfSize(3),
+ setup(engine) {
+ // Ensure pristine environment
+ Assert.equal(engine.previousFailed.size, 0);
+
+ // Write file to disk
+ engine.previousFailed = this.previousFailed;
+ Assert.equal(engine.previousFailed, this.previousFailed);
+ },
+ check(engine) {
+ // previousFailed is written asynchronously
+ assertSetsEqual(engine.previousFailed, this.previousFailed);
+ },
+ });
+
+ await testSteamEngineStorage({
+ previousFailed: guidSetOfSize(4),
+ previousFailed2: guidSetOfSize(5),
+ setup(engine) {
+ // Make sure it work for consecutive writes before the callback is executed.
+ engine.previousFailed = this.previousFailed;
+ Assert.equal(engine.previousFailed, this.previousFailed);
+
+ engine.previousFailed = this.previousFailed2;
+ Assert.equal(engine.previousFailed, this.previousFailed2);
+ },
+ check(engine) {
+ assertSetsEqual(engine.previousFailed, this.previousFailed2);
+ },
+ });
+
+ await testSteamEngineStorage({
+ previousFailed: guidSetOfSize(2),
+ async beforeCheck() {
+ let previousFailedPath = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ "failed",
+ "steam.json"
+ );
+ await IOUtils.writeJSON(previousFailedPath, this.previousFailed, {
+ tmpPath: previousFailedPath + ".tmp",
+ });
+ },
+ check(engine) {
+ // Read file from disk
+ assertSetsEqual(engine.previousFailed, this.previousFailed);
+ },
+ });
+});
+
+add_task(async function test_resetClient() {
+ _("SyncEngine.resetClient resets lastSync and toFetch");
+ await SyncTestingInfrastructure(server);
+ let engine = await makeSteamEngine();
+ try {
+ // Ensure pristine environment
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("steam.lastSync"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.equal(engine.toFetch.size, 0);
+
+ await engine.setLastSync(123.45);
+ engine.toFetch = guidSetOfSize(4);
+ engine.previousFailed = guidSetOfSize(3);
+
+ await engine.resetClient();
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(engine.toFetch.size, 0);
+ Assert.equal(engine.previousFailed.size, 0);
+ } finally {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function test_wipeServer() {
+ _("SyncEngine.wipeServer deletes server data and resets the client.");
+ let engine = await makeSteamEngine();
+
+ const PAYLOAD = 42;
+ let steamCollection = new ServerWBO("steam", PAYLOAD);
+ let steamServer = httpd_setup({
+ "/1.1/foo/storage/steam": steamCollection.handler(),
+ });
+ await SyncTestingInfrastructure(steamServer);
+ do_test_pending();
+
+ try {
+ // Some data to reset.
+ await engine.setLastSync(123.45);
+ engine.toFetch = guidSetOfSize(3);
+
+ _("Wipe server data and reset client.");
+ await engine.wipeServer();
+ Assert.equal(steamCollection.payload, undefined);
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(engine.toFetch.size, 0);
+ } finally {
+ steamServer.stop(do_test_finished);
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ }
+});
+
+add_task(async function finish() {
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_syncengine_sync.js b/services/sync/tests/unit/test_syncengine_sync.js
new file mode 100644
index 0000000000..efd061d5bc
--- /dev/null
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -0,0 +1,1781 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Weave } = ChromeUtils.importESModule(
+ "resource://services-sync/main.sys.mjs"
+);
+const { WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { RotaryEngine } = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/rotaryengine.sys.mjs"
+);
+
+function makeRotaryEngine() {
+ return new RotaryEngine(Service);
+}
+
+async function clean(engine) {
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ Svc.PrefBranch.setStringPref("log.logger.engine.rotary", "Trace");
+ Service.recordManager.clearCache();
+ await engine._tracker.clearChangedIDs();
+ await engine.finalize();
+}
+
+async function cleanAndGo(engine, server) {
+ await clean(engine);
+ await promiseStopServer(server);
+}
+
+async function promiseClean(engine, server) {
+ await clean(engine);
+ await promiseStopServer(server);
+}
+
+async function createServerAndConfigureClient() {
+ let engine = new RotaryEngine(Service);
+ let syncID = await engine.resetLocalSyncID();
+
+ let contents = {
+ meta: {
+ global: { engines: { rotary: { version: engine.version, syncID } } },
+ },
+ crypto: {},
+ rotary: {},
+ };
+
+ const USER = "foo";
+ let server = new SyncServer();
+ server.registerUser(USER, "password");
+ server.createContents(USER, contents);
+ server.start();
+
+ await SyncTestingInfrastructure(server, USER);
+ Service._updateCachedURLs();
+
+ return [engine, server, USER];
+}
+
+/*
+ * Tests
+ *
+ * SyncEngine._sync() is divided into four rather independent steps:
+ *
+ * - _syncStartup()
+ * - _processIncoming()
+ * - _uploadOutgoing()
+ * - _syncFinish()
+ *
+ * In the spirit of unit testing, these are tested individually for
+ * different scenarios below.
+ */
+
+add_task(async function setup() {
+ await generateNewKeys(Service.collectionKeys);
+ Svc.PrefBranch.setStringPref("log.logger.engine.rotary", "Trace");
+});
+
+add_task(async function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
+ _(
+ "SyncEngine._syncStartup resets sync and wipes server data if there's no or an outdated global record"
+ );
+
+ // Some server side data that's going to be wiped
+ let collection = new ServerCollection();
+ collection.insert(
+ "flying",
+ encryptPayload({ id: "flying", denomination: "LNER Class A3 4472" })
+ );
+ collection.insert(
+ "scotsman",
+ encryptPayload({ id: "scotsman", denomination: "Flying Scotsman" })
+ );
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ engine._store.items = { rekolok: "Rekonstruktionslokomotive" };
+ try {
+ // Confirm initial environment
+ const changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.rekolok, undefined);
+ let metaGlobal = await Service.recordManager.get(engine.metaURL);
+ Assert.equal(metaGlobal.payload.engines, undefined);
+ Assert.ok(!!collection.payload("flying"));
+ Assert.ok(!!collection.payload("scotsman"));
+
+ await engine.setLastSync(Date.now() / 1000);
+
+ // Trying to prompt a wipe -- we no longer track CryptoMeta per engine,
+ // so it has nothing to check.
+ await engine._syncStartup();
+
+ // The meta/global WBO has been filled with data about the engine
+ let engineData = metaGlobal.payload.engines.rotary;
+ Assert.equal(engineData.version, engine.version);
+ Assert.equal(engineData.syncID, await engine.getSyncID());
+
+ // Sync was reset and server data was wiped
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(collection.payload("flying"), undefined);
+ Assert.equal(collection.payload("scotsman"), undefined);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_syncStartup_serverHasNewerVersion() {
+ _("SyncEngine._syncStartup ");
+
+ let global = new ServerWBO("global", {
+ engines: { rotary: { version: 23456 } },
+ });
+ let server = httpd_setup({
+ "/1.1/foo/storage/meta/global": global.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ try {
+ // The server has a newer version of the data and our engine can
+ // handle. That should give us an exception.
+ let error;
+ try {
+ await engine._syncStartup();
+ } catch (ex) {
+ error = ex;
+ }
+ Assert.equal(error.failureCode, VERSION_OUT_OF_DATE);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_syncStartup_syncIDMismatchResetsClient() {
+ _("SyncEngine._syncStartup resets sync if syncIDs don't match");
+
+ let server = sync_httpd_setup({});
+
+ await SyncTestingInfrastructure(server);
+
+ // global record with a different syncID than our engine has
+ let engine = makeRotaryEngine();
+ let global = new ServerWBO("global", {
+ engines: { rotary: { version: engine.version, syncID: "foobar" } },
+ });
+ server.registerPathHandler("/1.1/foo/storage/meta/global", global.handler());
+
+ try {
+ // Confirm initial environment
+ Assert.equal(await engine.getSyncID(), "");
+ const changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.rekolok, undefined);
+
+ await engine.setLastSync(Date.now() / 1000);
+ await engine._syncStartup();
+
+ // The engine has assumed the server's syncID
+ Assert.equal(await engine.getSyncID(), "foobar");
+
+ // Sync was reset
+ Assert.equal(await engine.getLastSync(), 0);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_emptyServer() {
+ _("SyncEngine._processIncoming working with an empty server backend");
+
+ let collection = new ServerCollection();
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ try {
+ // Merely ensure that this code path is run without any errors
+ await engine._processIncoming();
+ Assert.equal(await engine.getLastSync(), 0);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_createFromServer() {
+ _("SyncEngine._processIncoming creates new records from server data");
+
+ // Some server records that will be downloaded
+ let collection = new ServerCollection();
+ collection.insert(
+ "flying",
+ encryptPayload({ id: "flying", denomination: "LNER Class A3 4472" })
+ );
+ collection.insert(
+ "scotsman",
+ encryptPayload({ id: "scotsman", denomination: "Flying Scotsman" })
+ );
+
+ // Two pathological cases involving relative URIs gone wrong.
+ let pathologicalPayload = encryptPayload({
+ id: "../pathological",
+ denomination: "Pathological Case",
+ });
+ collection.insert("../pathological", pathologicalPayload);
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
+ "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ await generateNewKeys(Service.collectionKeys);
+
+ let engine = makeRotaryEngine();
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ // Confirm initial environment
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(engine.lastModified, null);
+ Assert.equal(engine._store.items.flying, undefined);
+ Assert.equal(engine._store.items.scotsman, undefined);
+ Assert.equal(engine._store.items["../pathological"], undefined);
+
+ await engine._syncStartup();
+ await engine._processIncoming();
+
+ // Timestamps of last sync and last server modification are set.
+ Assert.ok((await engine.getLastSync()) > 0);
+ Assert.ok(engine.lastModified > 0);
+
+ // Local records have been created from the server data.
+ Assert.equal(engine._store.items.flying, "LNER Class A3 4472");
+ Assert.equal(engine._store.items.scotsman, "Flying Scotsman");
+ Assert.equal(engine._store.items["../pathological"], "Pathological Case");
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_reconcile() {
+ _("SyncEngine._processIncoming updates local records");
+
+ let collection = new ServerCollection();
+
+ // This server record is newer than the corresponding client one,
+ // so it'll update its data.
+ collection.insert(
+ "newrecord",
+ encryptPayload({ id: "newrecord", denomination: "New stuff..." })
+ );
+
+ // This server record is newer than the corresponding client one,
+ // so it'll update its data.
+ collection.insert(
+ "newerserver",
+ encryptPayload({ id: "newerserver", denomination: "New data!" })
+ );
+
+ // This server record is 2 mins older than the client counterpart
+ // but identical to it, so we're expecting the client record's
+ // changedID to be reset.
+ collection.insert(
+ "olderidentical",
+ encryptPayload({
+ id: "olderidentical",
+ denomination: "Older but identical",
+ })
+ );
+ collection._wbos.olderidentical.modified -= 120;
+
+ // This item simply has different data than the corresponding client
+ // record (which is unmodified), so it will update the client as well
+ collection.insert(
+ "updateclient",
+ encryptPayload({ id: "updateclient", denomination: "Get this!" })
+ );
+
+ // This is a dupe of 'original'.
+ collection.insert(
+ "duplication",
+ encryptPayload({ id: "duplication", denomination: "Original Entry" })
+ );
+
+ // This record is marked as deleted, so we're expecting the client
+ // record to be removed.
+ collection.insert(
+ "nukeme",
+ encryptPayload({ id: "nukeme", denomination: "Nuke me!", deleted: true })
+ );
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ engine._store.items = {
+ newerserver: "New data, but not as new as server!",
+ olderidentical: "Older but identical",
+ updateclient: "Got data?",
+ original: "Original Entry",
+ long_original: "Long Original Entry",
+ nukeme: "Nuke me!",
+ };
+ // Make this record 1 min old, thus older than the one on the server
+ await engine._tracker.addChangedID("newerserver", Date.now() / 1000 - 60);
+ // This record has been changed 2 mins later than the one on the server
+ await engine._tracker.addChangedID("olderidentical", Date.now() / 1000);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ // Confirm initial environment
+ Assert.equal(engine._store.items.newrecord, undefined);
+ Assert.equal(
+ engine._store.items.newerserver,
+ "New data, but not as new as server!"
+ );
+ Assert.equal(engine._store.items.olderidentical, "Older but identical");
+ Assert.equal(engine._store.items.updateclient, "Got data?");
+ Assert.equal(engine._store.items.nukeme, "Nuke me!");
+ let changes = await engine._tracker.getChangedIDs();
+ Assert.ok(changes.olderidentical > 0);
+
+ await engine._syncStartup();
+ await engine._processIncoming();
+
+ // Timestamps of last sync and last server modification are set.
+ Assert.ok((await engine.getLastSync()) > 0);
+ Assert.ok(engine.lastModified > 0);
+
+ // The new record is created.
+ Assert.equal(engine._store.items.newrecord, "New stuff...");
+
+ // The 'newerserver' record is updated since the server data is newer.
+ Assert.equal(engine._store.items.newerserver, "New data!");
+
+ // The data for 'olderidentical' is identical on the server, so
+ // it's no longer marked as changed anymore.
+ Assert.equal(engine._store.items.olderidentical, "Older but identical");
+ changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.olderidentical, undefined);
+
+ // Updated with server data.
+ Assert.equal(engine._store.items.updateclient, "Get this!");
+
+ // The incoming ID is preferred.
+ Assert.equal(engine._store.items.original, undefined);
+ Assert.equal(engine._store.items.duplication, "Original Entry");
+ Assert.notEqual(engine._delete.ids.indexOf("original"), -1);
+
+ // The 'nukeme' record marked as deleted is removed.
+ Assert.equal(engine._store.items.nukeme, undefined);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_reconcile_local_deleted() {
+ _("Ensure local, duplicate ID is deleted on server.");
+
+ // When a duplicate is resolved, the local ID (which is never taken) should
+ // be deleted on the server.
+ let [engine, server, user] = await createServerAndConfigureClient();
+
+ let now = Date.now() / 1000 - 10;
+ await engine.setLastSync(now);
+ engine.lastModified = now + 1;
+
+ let record = encryptPayload({
+ id: "DUPE_INCOMING",
+ denomination: "incoming",
+ });
+ let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
+ server.insertWBO(user, "rotary", wbo);
+
+ record = encryptPayload({ id: "DUPE_LOCAL", denomination: "local" });
+ wbo = new ServerWBO("DUPE_LOCAL", record, now - 1);
+ server.insertWBO(user, "rotary", wbo);
+
+ await engine._store.create({ id: "DUPE_LOCAL", denomination: "local" });
+ Assert.ok(await engine._store.itemExists("DUPE_LOCAL"));
+ Assert.equal("DUPE_LOCAL", await engine._findDupe({ id: "DUPE_INCOMING" }));
+
+ await engine._sync();
+
+ do_check_attribute_count(engine._store.items, 1);
+ Assert.ok("DUPE_INCOMING" in engine._store.items);
+
+ let collection = server.getCollection(user, "rotary");
+ Assert.equal(1, collection.count());
+ Assert.notEqual(undefined, collection.wbo("DUPE_INCOMING"));
+
+ await cleanAndGo(engine, server);
+});
+
+add_task(async function test_processIncoming_reconcile_equivalent() {
+ _("Ensure proper handling of incoming records that match local.");
+
+ let [engine, server, user] = await createServerAndConfigureClient();
+
+ let now = Date.now() / 1000 - 10;
+ await engine.setLastSync(now);
+ engine.lastModified = now + 1;
+
+ let record = encryptPayload({ id: "entry", denomination: "denomination" });
+ let wbo = new ServerWBO("entry", record, now + 2);
+ server.insertWBO(user, "rotary", wbo);
+
+ engine._store.items = { entry: "denomination" };
+ Assert.ok(await engine._store.itemExists("entry"));
+
+ await engine._sync();
+
+ do_check_attribute_count(engine._store.items, 1);
+
+ await cleanAndGo(engine, server);
+});
+
+add_task(
+ async function test_processIncoming_reconcile_locally_deleted_dupe_new() {
+ _(
+ "Ensure locally deleted duplicate record newer than incoming is handled."
+ );
+
+ // This is a somewhat complicated test. It ensures that if a client receives
+ // a modified record for an item that is deleted locally but with a different
+ // ID that the incoming record is ignored. This is a corner case for record
+ // handling, but it needs to be supported.
+ let [engine, server, user] = await createServerAndConfigureClient();
+
+ let now = Date.now() / 1000 - 10;
+ await engine.setLastSync(now);
+ engine.lastModified = now + 1;
+
+ let record = encryptPayload({
+ id: "DUPE_INCOMING",
+ denomination: "incoming",
+ });
+ let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
+ server.insertWBO(user, "rotary", wbo);
+
+ // Simulate a locally-deleted item.
+ engine._store.items = {};
+ await engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
+ Assert.equal(false, await engine._store.itemExists("DUPE_LOCAL"));
+ Assert.equal(false, await engine._store.itemExists("DUPE_INCOMING"));
+ Assert.equal("DUPE_LOCAL", await engine._findDupe({ id: "DUPE_INCOMING" }));
+
+ engine.lastModified = server.getCollection(user, engine.name).timestamp;
+ await engine._sync();
+
+ // After the sync, the server's payload for the original ID should be marked
+ // as deleted.
+ do_check_empty(engine._store.items);
+ let collection = server.getCollection(user, "rotary");
+ Assert.equal(1, collection.count());
+ wbo = collection.wbo("DUPE_INCOMING");
+ Assert.notEqual(null, wbo);
+ let payload = wbo.getCleartext();
+ Assert.ok(payload.deleted);
+
+ await cleanAndGo(engine, server);
+ }
+);
+
+add_task(
+ async function test_processIncoming_reconcile_locally_deleted_dupe_old() {
+ _(
+ "Ensure locally deleted duplicate record older than incoming is restored."
+ );
+
+ // This is similar to the above test except it tests the condition where the
+ // incoming record is newer than the local deletion, therefore overriding it.
+
+ let [engine, server, user] = await createServerAndConfigureClient();
+
+ let now = Date.now() / 1000 - 10;
+ await engine.setLastSync(now);
+ engine.lastModified = now + 1;
+
+ let record = encryptPayload({
+ id: "DUPE_INCOMING",
+ denomination: "incoming",
+ });
+ let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
+ server.insertWBO(user, "rotary", wbo);
+
+ // Simulate a locally-deleted item.
+ engine._store.items = {};
+ await engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
+ Assert.equal(false, await engine._store.itemExists("DUPE_LOCAL"));
+ Assert.equal(false, await engine._store.itemExists("DUPE_INCOMING"));
+ Assert.equal("DUPE_LOCAL", await engine._findDupe({ id: "DUPE_INCOMING" }));
+
+ await engine._sync();
+
+ // Since the remote change is newer, the incoming item should exist locally.
+ do_check_attribute_count(engine._store.items, 1);
+ Assert.ok("DUPE_INCOMING" in engine._store.items);
+ Assert.equal("incoming", engine._store.items.DUPE_INCOMING);
+
+ let collection = server.getCollection(user, "rotary");
+ Assert.equal(1, collection.count());
+ wbo = collection.wbo("DUPE_INCOMING");
+ let payload = wbo.getCleartext();
+ Assert.equal("incoming", payload.denomination);
+
+ await cleanAndGo(engine, server);
+ }
+);
+
+add_task(async function test_processIncoming_reconcile_changed_dupe() {
+ _("Ensure that locally changed duplicate record is handled properly.");
+
+ let [engine, server, user] = await createServerAndConfigureClient();
+
+ let now = Date.now() / 1000 - 10;
+ await engine.setLastSync(now);
+ engine.lastModified = now + 1;
+
+ // The local record is newer than the incoming one, so it should be retained.
+ let record = encryptPayload({
+ id: "DUPE_INCOMING",
+ denomination: "incoming",
+ });
+ let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
+ server.insertWBO(user, "rotary", wbo);
+
+ await engine._store.create({ id: "DUPE_LOCAL", denomination: "local" });
+ await engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
+ Assert.ok(await engine._store.itemExists("DUPE_LOCAL"));
+ Assert.equal("DUPE_LOCAL", await engine._findDupe({ id: "DUPE_INCOMING" }));
+
+ engine.lastModified = server.getCollection(user, engine.name).timestamp;
+ await engine._sync();
+
+ // The ID should have been changed to incoming.
+ do_check_attribute_count(engine._store.items, 1);
+ Assert.ok("DUPE_INCOMING" in engine._store.items);
+
+ // On the server, the local ID should be deleted and the incoming ID should
+ // have its payload set to what was in the local record.
+ let collection = server.getCollection(user, "rotary");
+ Assert.equal(1, collection.count());
+ wbo = collection.wbo("DUPE_INCOMING");
+ Assert.notEqual(undefined, wbo);
+ let payload = wbo.getCleartext();
+ Assert.equal("local", payload.denomination);
+
+ await cleanAndGo(engine, server);
+});
+
+add_task(async function test_processIncoming_reconcile_changed_dupe_new() {
+ _("Ensure locally changed duplicate record older than incoming is ignored.");
+
+ // This test is similar to the above except the incoming record is younger
+ // than the local record. The incoming record should be authoritative.
+ let [engine, server, user] = await createServerAndConfigureClient();
+
+ let now = Date.now() / 1000 - 10;
+ await engine.setLastSync(now);
+ engine.lastModified = now + 1;
+
+ let record = encryptPayload({
+ id: "DUPE_INCOMING",
+ denomination: "incoming",
+ });
+ let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
+ server.insertWBO(user, "rotary", wbo);
+
+ await engine._store.create({ id: "DUPE_LOCAL", denomination: "local" });
+ await engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
+ Assert.ok(await engine._store.itemExists("DUPE_LOCAL"));
+ Assert.equal("DUPE_LOCAL", await engine._findDupe({ id: "DUPE_INCOMING" }));
+
+ engine.lastModified = server.getCollection(user, engine.name).timestamp;
+ await engine._sync();
+
+ // The ID should have been changed to incoming.
+ do_check_attribute_count(engine._store.items, 1);
+ Assert.ok("DUPE_INCOMING" in engine._store.items);
+
+ // On the server, the local ID should be deleted and the incoming ID should
+ // have its payload retained.
+ let collection = server.getCollection(user, "rotary");
+ Assert.equal(1, collection.count());
+ wbo = collection.wbo("DUPE_INCOMING");
+ Assert.notEqual(undefined, wbo);
+ let payload = wbo.getCleartext();
+ Assert.equal("incoming", payload.denomination);
+ await cleanAndGo(engine, server);
+});
+
+add_task(async function test_processIncoming_resume_toFetch() {
+ _(
+ "toFetch and previousFailed items left over from previous syncs are fetched on the next sync, along with new items."
+ );
+
+ const LASTSYNC = Date.now() / 1000;
+
+ // Server records that will be downloaded
+ let collection = new ServerCollection();
+ collection.insert(
+ "flying",
+ encryptPayload({ id: "flying", denomination: "LNER Class A3 4472" })
+ );
+ collection.insert(
+ "scotsman",
+ encryptPayload({ id: "scotsman", denomination: "Flying Scotsman" })
+ );
+ collection.insert(
+ "rekolok",
+ encryptPayload({ id: "rekolok", denomination: "Rekonstruktionslokomotive" })
+ );
+ for (let i = 0; i < 3; i++) {
+ let id = "failed" + i;
+ let payload = encryptPayload({ id, denomination: "Record No. " + i });
+ let wbo = new ServerWBO(id, payload);
+ wbo.modified = LASTSYNC - 10;
+ collection.insertWBO(wbo);
+ }
+
+ collection.wbo("flying").modified = collection.wbo("scotsman").modified =
+ LASTSYNC - 10;
+ collection._wbos.rekolok.modified = LASTSYNC + 10;
+
+ // Time travel 10 seconds into the future but still download the above WBOs.
+ let engine = makeRotaryEngine();
+ await engine.setLastSync(LASTSYNC);
+ engine.toFetch = new SerializableSet(["flying", "scotsman"]);
+ engine.previousFailed = new SerializableSet([
+ "failed0",
+ "failed1",
+ "failed2",
+ ]);
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+ try {
+ // Confirm initial environment
+ Assert.equal(engine._store.items.flying, undefined);
+ Assert.equal(engine._store.items.scotsman, undefined);
+ Assert.equal(engine._store.items.rekolok, undefined);
+
+ await engine._syncStartup();
+ await engine._processIncoming();
+
+ // Local records have been created from the server data.
+ Assert.equal(engine._store.items.flying, "LNER Class A3 4472");
+ Assert.equal(engine._store.items.scotsman, "Flying Scotsman");
+ Assert.equal(engine._store.items.rekolok, "Rekonstruktionslokomotive");
+ Assert.equal(engine._store.items.failed0, "Record No. 0");
+ Assert.equal(engine._store.items.failed1, "Record No. 1");
+ Assert.equal(engine._store.items.failed2, "Record No. 2");
+ Assert.equal(engine.previousFailed.size, 0);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_notify_count() {
+ _("Ensure that failed records are reported only once.");
+
+ const NUMBER_OF_RECORDS = 15;
+
+ // Engine that fails every 5 records.
+ let engine = makeRotaryEngine();
+ engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
+ engine._store.applyIncomingBatch = async function (records, countTelemetry) {
+ let sortedRecords = records.sort((a, b) => (a.id > b.id ? 1 : -1));
+ let recordsToApply = [],
+ recordsToFail = [];
+ for (let i = 0; i < sortedRecords.length; i++) {
+ (i % 5 === 0 ? recordsToFail : recordsToApply).push(sortedRecords[i]);
+ }
+ recordsToFail.forEach(() => {
+ countTelemetry.addIncomingFailedReason("failed message");
+ });
+ await engine._store._applyIncomingBatch(recordsToApply, countTelemetry);
+
+ return recordsToFail.map(record => record.id);
+ };
+
+ // Create a batch of server side records.
+ let collection = new ServerCollection();
+ for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
+ let id = "record-no-" + i.toString(10).padStart(2, "0");
+ let payload = encryptPayload({ id, denomination: "Record No. " + id });
+ collection.insert(id, payload);
+ }
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+ try {
+ // Confirm initial environment.
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(engine.toFetch.size, 0);
+ Assert.equal(engine.previousFailed.size, 0);
+ do_check_empty(engine._store.items);
+
+ let called = 0;
+ let counts;
+ function onApplied(count) {
+ _("Called with " + JSON.stringify(counts));
+ counts = count;
+ called++;
+ }
+ Svc.Obs.add("weave:engine:sync:applied", onApplied);
+
+ // Do sync.
+ await engine._syncStartup();
+ await engine._processIncoming();
+
+ // Confirm failures.
+ do_check_attribute_count(engine._store.items, 12);
+ Assert.deepEqual(
+ Array.from(engine.previousFailed).sort(),
+ ["record-no-00", "record-no-05", "record-no-10"].sort()
+ );
+
+ // There are newly failed records and they are reported.
+ Assert.equal(called, 1);
+ Assert.equal(counts.failed, 3);
+ Assert.equal(counts.failedReasons[0].count, 3);
+ Assert.equal(counts.failedReasons[0].name, "failed message");
+ Assert.equal(counts.applied, 15);
+ Assert.equal(counts.newFailed, 3);
+ Assert.equal(counts.succeeded, 12);
+
+ // Sync again, 1 of the failed items are the same, the rest didn't fail.
+ await engine._processIncoming();
+
+ // Confirming removed failures.
+ do_check_attribute_count(engine._store.items, 14);
+ // After failing twice the record that failed again [record-no-00]
+ // should NOT be stored to try again
+ Assert.deepEqual(Array.from(engine.previousFailed), []);
+
+ Assert.equal(called, 2);
+ Assert.equal(counts.failed, 1);
+ Assert.equal(counts.failedReasons[0].count, 1);
+ Assert.equal(counts.failedReasons[0].name, "failed message");
+ Assert.equal(counts.applied, 3);
+ Assert.equal(counts.newFailed, 0);
+ Assert.equal(counts.succeeded, 2);
+
+ Svc.Obs.remove("weave:engine:sync:applied", onApplied);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_previousFailed() {
+ _("Ensure that failed records are retried.");
+
+ const NUMBER_OF_RECORDS = 14;
+
+ // Engine that alternates between failing and applying every 2 records.
+ let engine = makeRotaryEngine();
+ engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
+ engine._store.applyIncomingBatch = async function (records, countTelemetry) {
+ let sortedRecords = records.sort((a, b) => (a.id > b.id ? 1 : -1));
+ let recordsToApply = [],
+ recordsToFail = [];
+ let chunks = Array.from(PlacesUtils.chunkArray(sortedRecords, 2));
+ for (let i = 0; i < chunks.length; i++) {
+ (i % 2 === 0 ? recordsToFail : recordsToApply).push(...chunks[i]);
+ }
+ await engine._store._applyIncomingBatch(recordsToApply, countTelemetry);
+ return recordsToFail.map(record => record.id);
+ };
+
+ // Create a batch of server side records.
+ let collection = new ServerCollection();
+ for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
+ let id = "record-no-" + i.toString(10).padStart(2, "0");
+ let payload = encryptPayload({ id, denomination: "Record No. " + i });
+ collection.insert(id, payload);
+ }
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+ try {
+ // Confirm initial environment.
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(engine.toFetch.size, 0);
+ Assert.equal(engine.previousFailed.size, 0);
+ do_check_empty(engine._store.items);
+
+ // Initial failed items in previousFailed to be reset.
+ let previousFailed = new SerializableSet([
+ Utils.makeGUID(),
+ Utils.makeGUID(),
+ Utils.makeGUID(),
+ ]);
+ engine.previousFailed = previousFailed;
+ Assert.equal(engine.previousFailed, previousFailed);
+
+ // Do sync.
+ await engine._syncStartup();
+ await engine._processIncoming();
+
+ // Expected result: 4 sync batches with 2 failures each => 8 failures
+ do_check_attribute_count(engine._store.items, 6);
+ Assert.deepEqual(
+ Array.from(engine.previousFailed).sort(),
+ [
+ "record-no-00",
+ "record-no-01",
+ "record-no-04",
+ "record-no-05",
+ "record-no-08",
+ "record-no-09",
+ "record-no-12",
+ "record-no-13",
+ ].sort()
+ );
+
+ // Sync again with the same failed items (records 0, 1, 8, 9).
+ await engine._processIncoming();
+
+ do_check_attribute_count(engine._store.items, 10);
+ // A second sync with the same failed items should NOT add the same items again.
+ // Items that did not fail a second time should no longer be in previousFailed.
+ Assert.deepEqual(Array.from(engine.previousFailed).sort(), []);
+
+ // Refetched items that didn't fail the second time are in engine._store.items.
+ Assert.equal(engine._store.items["record-no-04"], "Record No. 4");
+ Assert.equal(engine._store.items["record-no-05"], "Record No. 5");
+ Assert.equal(engine._store.items["record-no-12"], "Record No. 12");
+ Assert.equal(engine._store.items["record-no-13"], "Record No. 13");
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_failed_records() {
+ _(
+ "Ensure that failed records from _reconcile and applyIncomingBatch are refetched."
+ );
+
+ // Let's create three and a bit batches worth of server side records.
+ let APPLY_BATCH_SIZE = 50;
+ let collection = new ServerCollection();
+ const NUMBER_OF_RECORDS = APPLY_BATCH_SIZE * 3 + 5;
+ for (let i = 0; i < NUMBER_OF_RECORDS; i++) {
+ let id = "record-no-" + i;
+ let payload = encryptPayload({ id, denomination: "Record No. " + id });
+ let wbo = new ServerWBO(id, payload);
+ wbo.modified = Date.now() / 1000 + 60 * (i - APPLY_BATCH_SIZE * 3);
+ collection.insertWBO(wbo);
+ }
+
+ // Engine that batches but likes to throw on a couple of records,
+ // two in each batch: the even ones fail in reconcile, the odd ones
+ // in applyIncoming.
+ const BOGUS_RECORDS = [
+ "record-no-" + 42,
+ "record-no-" + 23,
+ "record-no-" + (42 + APPLY_BATCH_SIZE),
+ "record-no-" + (23 + APPLY_BATCH_SIZE),
+ "record-no-" + (42 + APPLY_BATCH_SIZE * 2),
+ "record-no-" + (23 + APPLY_BATCH_SIZE * 2),
+ "record-no-" + (2 + APPLY_BATCH_SIZE * 3),
+ "record-no-" + (1 + APPLY_BATCH_SIZE * 3),
+ ];
+ let engine = makeRotaryEngine();
+
+ engine.__reconcile = engine._reconcile;
+ engine._reconcile = async function _reconcile(record) {
+ if (BOGUS_RECORDS.indexOf(record.id) % 2 == 0) {
+ throw new Error("I don't like this record! Baaaaaah!");
+ }
+ return this.__reconcile.apply(this, arguments);
+ };
+ engine._store._applyIncoming = engine._store.applyIncoming;
+ engine._store.applyIncoming = async function (record) {
+ if (BOGUS_RECORDS.indexOf(record.id) % 2 == 1) {
+ throw new Error("I don't like this record! Baaaaaah!");
+ }
+ return this._applyIncoming.apply(this, arguments);
+ };
+
+ // Keep track of requests made of a collection.
+ let count = 0;
+ let uris = [];
+ function recording_handler(recordedCollection) {
+ let h = recordedCollection.handler();
+ return function (req, res) {
+ ++count;
+ uris.push(req.path + "?" + req.queryString);
+ return h(req, res);
+ };
+ }
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": recording_handler(collection),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ // Confirm initial environment
+ Assert.equal(await engine.getLastSync(), 0);
+ Assert.equal(engine.toFetch.size, 0);
+ Assert.equal(engine.previousFailed.size, 0);
+ do_check_empty(engine._store.items);
+
+ let observerSubject;
+ let observerData;
+ Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
+ Svc.Obs.remove("weave:engine:sync:applied", onApplied);
+ observerSubject = subject;
+ observerData = data;
+ });
+
+ await engine._syncStartup();
+ await engine._processIncoming();
+
+ // Ensure that all records but the bogus 4 have been applied.
+ do_check_attribute_count(
+ engine._store.items,
+ NUMBER_OF_RECORDS - BOGUS_RECORDS.length
+ );
+
+ // Ensure that the bogus records will be fetched again on the next sync.
+ Assert.equal(engine.previousFailed.size, BOGUS_RECORDS.length);
+ Assert.deepEqual(
+ Array.from(engine.previousFailed).sort(),
+ BOGUS_RECORDS.sort()
+ );
+
+ // Ensure the observer was notified
+ Assert.equal(observerData, engine.name);
+ Assert.equal(observerSubject.failed, BOGUS_RECORDS.length);
+ Assert.equal(observerSubject.newFailed, BOGUS_RECORDS.length);
+
+ // Testing batching of failed item fetches.
+ // Try to sync again. Ensure that we split the request into chunks to avoid
+ // URI length limitations.
+ async function batchDownload(batchSize) {
+ count = 0;
+ uris = [];
+ engine.guidFetchBatchSize = batchSize;
+ await engine._processIncoming();
+ _("Tried again. Requests: " + count + "; URIs: " + JSON.stringify(uris));
+ return count;
+ }
+
+ // There are 8 bad records, so this needs 3 fetches.
+ _("Test batching with ID batch size 3, normal mobile batch size.");
+ Assert.equal(await batchDownload(3), 3);
+
+ // Since there the previous batch failed again, there should be
+ // no more records to fetch
+ _("Test that the second time a record failed to sync, gets ignored");
+ Assert.equal(await batchDownload(BOGUS_RECORDS.length), 0);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_processIncoming_decrypt_failed() {
+ _("Ensure that records failing to decrypt are either replaced or refetched.");
+
+ // Some good and some bogus records. One doesn't contain valid JSON,
+ // the other will throw during decrypt.
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO(
+ "flying",
+ encryptPayload({ id: "flying", denomination: "LNER Class A3 4472" })
+ );
+ collection._wbos.nojson = new ServerWBO("nojson", "This is invalid JSON");
+ collection._wbos.nojson2 = new ServerWBO("nojson2", "This is invalid JSON");
+ collection._wbos.scotsman = new ServerWBO(
+ "scotsman",
+ encryptPayload({ id: "scotsman", denomination: "Flying Scotsman" })
+ );
+ collection._wbos.nodecrypt = new ServerWBO("nodecrypt", "Decrypt this!");
+ collection._wbos.nodecrypt2 = new ServerWBO("nodecrypt2", "Decrypt this!");
+
+ // Patch the fake crypto service to throw on the record above.
+ Weave.Crypto._decrypt = Weave.Crypto.decrypt;
+ Weave.Crypto.decrypt = function (ciphertext) {
+ if (ciphertext == "Decrypt this!") {
+ throw new Error(
+ "Derp! Cipher finalized failed. Im ur crypto destroyin ur recordz."
+ );
+ }
+ return this._decrypt.apply(this, arguments);
+ };
+
+ // Some broken records also exist locally.
+ let engine = makeRotaryEngine();
+ engine.enabled = true;
+ engine._store.items = { nojson: "Valid JSON", nodecrypt: "Valid ciphertext" };
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+ try {
+ // Confirm initial state
+ Assert.equal(engine.toFetch.size, 0);
+ Assert.equal(engine.previousFailed.size, 0);
+
+ let observerSubject;
+ let observerData;
+ Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
+ Svc.Obs.remove("weave:engine:sync:applied", onApplied);
+ observerSubject = subject;
+ observerData = data;
+ });
+
+ await engine.setLastSync(collection.wbo("nojson").modified - 1);
+ let ping = await sync_engine_and_validate_telem(engine, true);
+ Assert.equal(ping.engines[0].incoming.applied, 2);
+ Assert.equal(ping.engines[0].incoming.failed, 4);
+ console.log("incoming telem: ", ping.engines[0].incoming);
+ Assert.equal(
+ ping.engines[0].incoming.failedReasons[0].name,
+ "No ciphertext: nothing to decrypt?"
+ );
+ // There should be 4 of the same error
+ Assert.equal(ping.engines[0].incoming.failedReasons[0].count, 4);
+
+ Assert.equal(engine.previousFailed.size, 4);
+ Assert.ok(engine.previousFailed.has("nojson"));
+ Assert.ok(engine.previousFailed.has("nojson2"));
+ Assert.ok(engine.previousFailed.has("nodecrypt"));
+ Assert.ok(engine.previousFailed.has("nodecrypt2"));
+
+ // Ensure the observer was notified
+ Assert.equal(observerData, engine.name);
+ Assert.equal(observerSubject.applied, 2);
+ Assert.equal(observerSubject.failed, 4);
+ Assert.equal(observerSubject.failedReasons[0].count, 4);
+ } finally {
+ await promiseClean(engine, server);
+ }
+});
+
+add_task(async function test_uploadOutgoing_toEmptyServer() {
+ _("SyncEngine._uploadOutgoing uploads new records to server");
+
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO("flying");
+ collection._wbos.scotsman = new ServerWBO("scotsman");
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
+ "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let engine = makeRotaryEngine();
+ engine._store.items = {
+ flying: "LNER Class A3 4472",
+ scotsman: "Flying Scotsman",
+ };
+ // Mark one of these records as changed
+ await engine._tracker.addChangedID("scotsman", 0);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
+
+ // Confirm initial environment
+ Assert.equal(collection.payload("flying"), undefined);
+ Assert.equal(collection.payload("scotsman"), undefined);
+
+ await engine._syncStartup();
+ await engine._uploadOutgoing();
+
+ // Ensure the marked record ('scotsman') has been uploaded and is
+ // no longer marked.
+ Assert.equal(collection.payload("flying"), undefined);
+ Assert.ok(!!collection.payload("scotsman"));
+ Assert.equal(collection.cleartext("scotsman").id, "scotsman");
+ const changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.scotsman, undefined);
+
+ // The 'flying' record wasn't marked so it wasn't uploaded
+ Assert.equal(collection.payload("flying"), undefined);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+async function test_uploadOutgoing_max_record_payload_bytes(
+ allowSkippedRecord
+) {
+ _(
+ "SyncEngine._uploadOutgoing throws when payload is bigger than max_record_payload_bytes"
+ );
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO("flying");
+ collection._wbos.scotsman = new ServerWBO("scotsman");
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
+ "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let engine = makeRotaryEngine();
+ engine.allowSkippedRecord = allowSkippedRecord;
+ engine._store.items = { flying: "a".repeat(1024 * 1024), scotsman: "abcd" };
+
+ await engine._tracker.addChangedID("flying", 1000);
+ await engine._tracker.addChangedID("scotsman", 1000);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ await engine.setLastSync(1); // needs to be non-zero so that tracker is queried
+
+ // Confirm initial environment
+ Assert.equal(collection.payload("flying"), undefined);
+ Assert.equal(collection.payload("scotsman"), undefined);
+
+ await engine._syncStartup();
+ await engine._uploadOutgoing();
+
+ if (!allowSkippedRecord) {
+ do_throw("should not get here");
+ }
+
+ await engine.trackRemainingChanges();
+
+ // Check we uploaded the other record to the server
+ Assert.ok(collection.payload("scotsman"));
+ // And that we won't try to upload the huge record next time.
+ const changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.flying, undefined);
+ } catch (e) {
+ if (allowSkippedRecord) {
+ do_throw("should not get here");
+ }
+
+ await engine.trackRemainingChanges();
+
+ // Check that we will try to upload the huge record next time
+ const changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.flying, 1000);
+ } finally {
+ // Check we didn't upload the oversized record to the server
+ Assert.equal(collection.payload("flying"), undefined);
+ await cleanAndGo(engine, server);
+ }
+}
+
+add_task(
+ async function test_uploadOutgoing_max_record_payload_bytes_disallowSkippedRecords() {
+ return test_uploadOutgoing_max_record_payload_bytes(false);
+ }
+);
+
+add_task(
+ async function test_uploadOutgoing_max_record_payload_bytes_allowSkippedRecords() {
+ return test_uploadOutgoing_max_record_payload_bytes(true);
+ }
+);
+
+add_task(async function test_uploadOutgoing_failed() {
+ _(
+ "SyncEngine._uploadOutgoing doesn't clear the tracker of objects that failed to upload."
+ );
+
+ let collection = new ServerCollection();
+ // We only define the "flying" WBO on the server, not the "scotsman"
+ // and "peppercorn" ones.
+ collection._wbos.flying = new ServerWBO("flying");
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ engine._store.items = {
+ flying: "LNER Class A3 4472",
+ scotsman: "Flying Scotsman",
+ peppercorn: "Peppercorn Class",
+ };
+ // Mark these records as changed
+ const FLYING_CHANGED = 12345;
+ const SCOTSMAN_CHANGED = 23456;
+ const PEPPERCORN_CHANGED = 34567;
+ await engine._tracker.addChangedID("flying", FLYING_CHANGED);
+ await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
+ await engine._tracker.addChangedID("peppercorn", PEPPERCORN_CHANGED);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
+
+ // Confirm initial environment
+ Assert.equal(collection.payload("flying"), undefined);
+ let changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.flying, FLYING_CHANGED);
+ Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
+ Assert.equal(changes.peppercorn, PEPPERCORN_CHANGED);
+
+ engine.enabled = true;
+ await sync_engine_and_validate_telem(engine, true);
+
+ // Ensure the 'flying' record has been uploaded and is no longer marked.
+ Assert.ok(!!collection.payload("flying"));
+ changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.flying, undefined);
+
+ // The 'scotsman' and 'peppercorn' records couldn't be uploaded so
+ // they weren't cleared from the tracker.
+ Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
+ Assert.equal(changes.peppercorn, PEPPERCORN_CHANGED);
+ } finally {
+ await promiseClean(engine, server);
+ }
+});
+
+async function createRecordFailTelemetry(allowSkippedRecord) {
+ Services.prefs.setStringPref("services.sync.username", "foo");
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO("flying");
+ collection._wbos.scotsman = new ServerWBO("scotsman");
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ engine.allowSkippedRecord = allowSkippedRecord;
+ let oldCreateRecord = engine._store.createRecord;
+ engine._store.createRecord = async (id, col) => {
+ if (id != "flying") {
+ throw new Error("oops");
+ }
+ return oldCreateRecord.call(engine._store, id, col);
+ };
+ engine._store.items = {
+ flying: "LNER Class A3 4472",
+ scotsman: "Flying Scotsman",
+ };
+ // Mark these records as changed
+ const FLYING_CHANGED = 12345;
+ const SCOTSMAN_CHANGED = 23456;
+ await engine._tracker.addChangedID("flying", FLYING_CHANGED);
+ await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ let ping;
+ try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
+
+ // Confirm initial environment
+ Assert.equal(collection.payload("flying"), undefined);
+ let changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.flying, FLYING_CHANGED);
+ Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
+
+ engine.enabled = true;
+ ping = await sync_engine_and_validate_telem(engine, true, onErrorPing => {
+ ping = onErrorPing;
+ });
+
+ if (!allowSkippedRecord) {
+ do_throw("should not get here");
+ }
+
+ // Ensure the 'flying' record has been uploaded and is no longer marked.
+ Assert.ok(!!collection.payload("flying"));
+ changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.flying, undefined);
+ } catch (err) {
+ if (allowSkippedRecord) {
+ do_throw("should not get here");
+ }
+
+ // Ensure the 'flying' record has not been uploaded and is still marked
+ Assert.ok(!collection.payload("flying"));
+ const changes = await engine._tracker.getChangedIDs();
+ Assert.ok(changes.flying);
+ } finally {
+ // We reported in telemetry that we failed a record
+ Assert.equal(ping.engines[0].outgoing[0].failed, 1);
+ Assert.equal(ping.engines[0].outgoing[0].failedReasons[0].name, "oops");
+
+ // In any case, the 'scotsman' record couldn't be created so it wasn't
+ // uploaded nor it was not cleared from the tracker.
+ Assert.ok(!collection.payload("scotsman"));
+ const changes = await engine._tracker.getChangedIDs();
+ Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
+
+ engine._store.createRecord = oldCreateRecord;
+ await promiseClean(engine, server);
+ }
+}
+
+add_task(
+ async function test_uploadOutgoing_createRecord_throws_reported_telemetry() {
+ _(
+ "SyncEngine._uploadOutgoing reports a failed record to telemetry if createRecord throws"
+ );
+ await createRecordFailTelemetry(true);
+ }
+);
+
+add_task(
+ async function test_uploadOutgoing_createRecord_throws_dontAllowSkipRecord() {
+ _(
+ "SyncEngine._uploadOutgoing will throw if createRecord throws and allowSkipRecord is set to false"
+ );
+ await createRecordFailTelemetry(false);
+ }
+);
+
+add_task(async function test_uploadOutgoing_largeRecords() {
+ _(
+ "SyncEngine._uploadOutgoing throws on records larger than the max record payload size"
+ );
+
+ let collection = new ServerCollection();
+
+ let engine = makeRotaryEngine();
+ engine.allowSkippedRecord = false;
+ engine._store.items["large-item"] = "Y".repeat(
+ Service.getMaxRecordPayloadSize() * 2
+ );
+ await engine._tracker.addChangedID("large-item", 0);
+ collection.insert("large-item");
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ try {
+ await engine._syncStartup();
+ let error = null;
+ try {
+ await engine._uploadOutgoing();
+ } catch (e) {
+ error = e;
+ }
+ ok(!!error);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_syncFinish_deleteByIds() {
+ _(
+ "SyncEngine._syncFinish deletes server records slated for deletion (list of record IDs)."
+ );
+
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO(
+ "flying",
+ encryptPayload({ id: "flying", denomination: "LNER Class A3 4472" })
+ );
+ collection._wbos.scotsman = new ServerWBO(
+ "scotsman",
+ encryptPayload({ id: "scotsman", denomination: "Flying Scotsman" })
+ );
+ collection._wbos.rekolok = new ServerWBO(
+ "rekolok",
+ encryptPayload({ id: "rekolok", denomination: "Rekonstruktionslokomotive" })
+ );
+
+ let server = httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ try {
+ engine._delete = { ids: ["flying", "rekolok"] };
+ await engine._syncFinish();
+
+ // The 'flying' and 'rekolok' records were deleted while the
+ // 'scotsman' one wasn't.
+ Assert.equal(collection.payload("flying"), undefined);
+ Assert.ok(!!collection.payload("scotsman"));
+ Assert.equal(collection.payload("rekolok"), undefined);
+
+ // The deletion todo list has been reset.
+ Assert.equal(engine._delete.ids, undefined);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_syncFinish_deleteLotsInBatches() {
+ _(
+ "SyncEngine._syncFinish deletes server records in batches of 100 (list of record IDs)."
+ );
+
+ let collection = new ServerCollection();
+
+ // Let's count how many times the client does a DELETE request to the server
+ var noOfUploads = 0;
+ collection.delete = (function (orig) {
+ return function () {
+ noOfUploads++;
+ return orig.apply(this, arguments);
+ };
+ })(collection.delete);
+
+ // Create a bunch of records on the server
+ let now = Date.now();
+ for (var i = 0; i < 234; i++) {
+ let id = "record-no-" + i;
+ let payload = encryptPayload({ id, denomination: "Record No. " + i });
+ let wbo = new ServerWBO(id, payload);
+ wbo.modified = now / 1000 - 60 * (i + 110);
+ collection.insertWBO(wbo);
+ }
+
+ let server = httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let engine = makeRotaryEngine();
+ try {
+ // Confirm initial environment
+ Assert.equal(noOfUploads, 0);
+
+ // Declare what we want to have deleted: all records no. 100 and
+ // up and all records that are less than 200 mins old (which are
+ // records 0 thru 90).
+ engine._delete = { ids: [], newer: now / 1000 - 60 * 200.5 };
+ for (i = 100; i < 234; i++) {
+ engine._delete.ids.push("record-no-" + i);
+ }
+
+ await engine._syncFinish();
+
+ // Ensure that the appropriate server data has been wiped while
+ // preserving records 90 thru 200.
+ for (i = 0; i < 234; i++) {
+ let id = "record-no-" + i;
+ if (i <= 90 || i >= 100) {
+ Assert.equal(collection.payload(id), undefined);
+ } else {
+ Assert.ok(!!collection.payload(id));
+ }
+ }
+
+ // The deletion was done in batches
+ Assert.equal(noOfUploads, 2 + 1);
+
+ // The deletion todo list has been reset.
+ Assert.equal(engine._delete.ids, undefined);
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_sync_partialUpload() {
+ _("SyncEngine.sync() keeps changedIDs that couldn't be uploaded.");
+
+ let collection = new ServerCollection();
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+ let oldServerConfiguration = Service.serverConfiguration;
+ Service.serverConfiguration = {
+ max_post_records: 100,
+ };
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let engine = makeRotaryEngine();
+
+ // Let the third upload fail completely
+ var noOfUploads = 0;
+ collection.post = (function (orig) {
+ return function () {
+ if (noOfUploads == 2) {
+ throw new Error("FAIL!");
+ }
+ noOfUploads++;
+ return orig.apply(this, arguments);
+ };
+ })(collection.post);
+
+ // Create a bunch of records (and server side handlers)
+ for (let i = 0; i < 234; i++) {
+ let id = "record-no-" + i;
+ engine._store.items[id] = "Record No. " + i;
+ await engine._tracker.addChangedID(id, i);
+ // Let two items in the first upload batch fail.
+ if (i != 23 && i != 42) {
+ collection.insert(id);
+ }
+ }
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
+
+ engine.enabled = true;
+ let error;
+ try {
+ await sync_engine_and_validate_telem(engine, true);
+ } catch (ex) {
+ error = ex;
+ }
+
+ ok(!!error);
+
+ const changes = await engine._tracker.getChangedIDs();
+ for (let i = 0; i < 234; i++) {
+ let id = "record-no-" + i;
+ // Ensure failed records are back in the tracker:
+ // * records no. 23 and 42 were rejected by the server,
+ // * records after the third batch and higher couldn't be uploaded because
+ // we failed hard on the 3rd upload.
+ if (i == 23 || i == 42 || i >= 200) {
+ Assert.equal(changes[id], i);
+ } else {
+ Assert.equal(false, id in changes);
+ }
+ }
+ } finally {
+ Service.serverConfiguration = oldServerConfiguration;
+ await promiseClean(engine, server);
+ }
+});
+
+add_task(async function test_canDecrypt_noCryptoKeys() {
+ _(
+ "SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection."
+ );
+
+ // Wipe collection keys so we can test the desired scenario.
+ Service.collectionKeys.clear();
+
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO(
+ "flying",
+ encryptPayload({ id: "flying", denomination: "LNER Class A3 4472" })
+ );
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+ let engine = makeRotaryEngine();
+ try {
+ Assert.equal(false, await engine.canDecrypt());
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_canDecrypt_true() {
+ _(
+ "SyncEngine.canDecrypt returns true if the engine can decrypt the items on the server."
+ );
+
+ await generateNewKeys(Service.collectionKeys);
+
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO(
+ "flying",
+ encryptPayload({ id: "flying", denomination: "LNER Class A3 4472" })
+ );
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+ let engine = makeRotaryEngine();
+ try {
+ Assert.ok(await engine.canDecrypt());
+ } finally {
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_syncapplied_observer() {
+ const NUMBER_OF_RECORDS = 10;
+
+ let engine = makeRotaryEngine();
+
+ // Create a batch of server side records.
+ let collection = new ServerCollection();
+ for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
+ let id = "record-no-" + i;
+ let payload = encryptPayload({ id, denomination: "Record No. " + id });
+ collection.insert(id, payload);
+ }
+
+ let server = httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ let numApplyCalls = 0;
+ let engine_name;
+ let count;
+ function onApplied(subject, data) {
+ numApplyCalls++;
+ engine_name = data;
+ count = subject;
+ }
+
+ Svc.Obs.add("weave:engine:sync:applied", onApplied);
+
+ try {
+ Service.scheduler.hasIncomingItems = false;
+
+ // Do sync.
+ await engine._syncStartup();
+ await engine._processIncoming();
+
+ do_check_attribute_count(engine._store.items, 10);
+
+ Assert.equal(numApplyCalls, 1);
+ Assert.equal(engine_name, "rotary");
+ Assert.equal(count.applied, 10);
+
+ Assert.ok(Service.scheduler.hasIncomingItems);
+ } finally {
+ await cleanAndGo(engine, server);
+ Service.scheduler.hasIncomingItems = false;
+ Svc.Obs.remove("weave:engine:sync:applied", onApplied);
+ }
+});
diff --git a/services/sync/tests/unit/test_syncscheduler.js b/services/sync/tests/unit/test_syncscheduler.js
new file mode 100644
index 0000000000..98b7937da3
--- /dev/null
+++ b/services/sync/tests/unit/test_syncscheduler.js
@@ -0,0 +1,1195 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { FxAccounts } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+);
+const { SyncAuthManager } = ChromeUtils.importESModule(
+ "resource://services-sync/sync_auth.sys.mjs"
+);
+const { SyncScheduler } = ChromeUtils.importESModule(
+ "resource://services-sync/policies.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { Status } = ChromeUtils.importESModule(
+ "resource://services-sync/status.sys.mjs"
+);
+
+function CatapultEngine() {
+ SyncEngine.call(this, "Catapult", Service);
+}
+CatapultEngine.prototype = {
+ exception: null, // tests fill this in
+ async _sync() {
+ throw this.exception;
+ },
+};
+Object.setPrototypeOf(CatapultEngine.prototype, SyncEngine.prototype);
+
+var scheduler = new SyncScheduler(Service);
+let clientsEngine;
+
+async function sync_httpd_setup() {
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+ let global = new ServerWBO("global", {
+ syncID: Service.syncID,
+ storageVersion: STORAGE_VERSION,
+ engines: {
+ clients: { version: clientsEngine.version, syncID: clientsSyncID },
+ },
+ });
+ let clientsColl = new ServerCollection({}, true);
+
+ // Tracking info/collections.
+ let collectionsHelper = track_collections_helper();
+ let upd = collectionsHelper.with_updated_collection;
+
+ return httpd_setup({
+ "/1.1/johndoe@mozilla.com/storage/meta/global": upd(
+ "meta",
+ global.handler()
+ ),
+ "/1.1/johndoe@mozilla.com/info/collections": collectionsHelper.handler,
+ "/1.1/johndoe@mozilla.com/storage/crypto/keys": upd(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe@mozilla.com/storage/clients": upd(
+ "clients",
+ clientsColl.handler()
+ ),
+ });
+}
+
+async function setUp(server) {
+ await configureIdentity({ username: "johndoe@mozilla.com" }, server);
+
+ await generateNewKeys(Service.collectionKeys);
+ let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
+ await serverKeys.encrypt(Service.identity.syncKeyBundle);
+ let result = (
+ await serverKeys.upload(Service.resource(Service.cryptoKeysURL))
+ ).success;
+ return result;
+}
+
+async function cleanUpAndGo(server) {
+ await Async.promiseYield();
+ await clientsEngine._store.wipe();
+ await Service.startOver();
+ // Re-enable logging, which we just disabled.
+ syncTestLogging();
+ if (server) {
+ await promiseStopServer(server);
+ }
+}
+
+add_task(async function setup() {
+ await Service.promiseInitialized;
+ clientsEngine = Service.clientsEngine;
+ // Don't remove stale clients when syncing. This is a test-only workaround
+ // that lets us add clients directly to the store, without losing them on
+ // the next sync.
+ clientsEngine._removeRemoteClient = async id => {};
+ await Service.engineManager.clear();
+
+ validate_all_future_pings();
+
+ scheduler.setDefaults();
+
+ await Service.engineManager.register(CatapultEngine);
+});
+
+add_test(function test_prefAttributes() {
+ _("Test various attributes corresponding to preferences.");
+
+ const INTERVAL = 42 * 60 * 1000; // 42 minutes
+ const THRESHOLD = 3142;
+ const SCORE = 2718;
+ const TIMESTAMP1 = 1275493471649;
+
+ _(
+ "The 'nextSync' attribute stores a millisecond timestamp rounded down to the nearest second."
+ );
+ Assert.equal(scheduler.nextSync, 0);
+ scheduler.nextSync = TIMESTAMP1;
+ Assert.equal(scheduler.nextSync, Math.floor(TIMESTAMP1 / 1000) * 1000);
+
+ _("'syncInterval' defaults to singleDeviceInterval.");
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("syncInterval"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ _("'syncInterval' corresponds to a preference setting.");
+ scheduler.syncInterval = INTERVAL;
+ Assert.equal(scheduler.syncInterval, INTERVAL);
+ Assert.equal(Svc.PrefBranch.getIntPref("syncInterval"), INTERVAL);
+
+ _(
+ "'syncThreshold' corresponds to preference, defaults to SINGLE_USER_THRESHOLD"
+ );
+ Assert.equal(
+ Svc.PrefBranch.getPrefType("syncThreshold"),
+ Ci.nsIPrefBranch.PREF_INVALID
+ );
+ Assert.equal(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
+ scheduler.syncThreshold = THRESHOLD;
+ Assert.equal(scheduler.syncThreshold, THRESHOLD);
+
+ _("'globalScore' corresponds to preference, defaults to zero.");
+ Assert.equal(Svc.PrefBranch.getIntPref("globalScore"), 0);
+ Assert.equal(scheduler.globalScore, 0);
+ scheduler.globalScore = SCORE;
+ Assert.equal(scheduler.globalScore, SCORE);
+ Assert.equal(Svc.PrefBranch.getIntPref("globalScore"), SCORE);
+
+ _("Intervals correspond to default preferences.");
+ Assert.equal(
+ scheduler.singleDeviceInterval,
+ Svc.PrefBranch.getIntPref("scheduler.fxa.singleDeviceInterval") * 1000
+ );
+ Assert.equal(
+ scheduler.idleInterval,
+ Svc.PrefBranch.getIntPref("scheduler.idleInterval") * 1000
+ );
+ Assert.equal(
+ scheduler.activeInterval,
+ Svc.PrefBranch.getIntPref("scheduler.activeInterval") * 1000
+ );
+ Assert.equal(
+ scheduler.immediateInterval,
+ Svc.PrefBranch.getIntPref("scheduler.immediateInterval") * 1000
+ );
+
+ _("Custom values for prefs will take effect after a restart.");
+ Svc.PrefBranch.setIntPref("scheduler.fxa.singleDeviceInterval", 420);
+ Svc.PrefBranch.setIntPref("scheduler.idleInterval", 230);
+ Svc.PrefBranch.setIntPref("scheduler.activeInterval", 180);
+ Svc.PrefBranch.setIntPref("scheduler.immediateInterval", 31415);
+ scheduler.setDefaults();
+ Assert.equal(scheduler.idleInterval, 230000);
+ Assert.equal(scheduler.singleDeviceInterval, 420000);
+ Assert.equal(scheduler.activeInterval, 180000);
+ Assert.equal(scheduler.immediateInterval, 31415000);
+
+ _("Custom values for interval prefs can't be less than 60 seconds.");
+ Svc.PrefBranch.setIntPref("scheduler.fxa.singleDeviceInterval", 42);
+ Svc.PrefBranch.setIntPref("scheduler.idleInterval", 50);
+ Svc.PrefBranch.setIntPref("scheduler.activeInterval", 50);
+ Svc.PrefBranch.setIntPref("scheduler.immediateInterval", 10);
+ scheduler.setDefaults();
+ Assert.equal(scheduler.idleInterval, 60000);
+ Assert.equal(scheduler.singleDeviceInterval, 60000);
+ Assert.equal(scheduler.activeInterval, 60000);
+ Assert.equal(scheduler.immediateInterval, 60000);
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ scheduler.setDefaults();
+ run_next_test();
+});
+
+add_task(async function test_sync_skipped_low_score_no_resync() {
+ enableValidationPrefs();
+ let server = await sync_httpd_setup();
+
+ function SkipEngine() {
+ SyncEngine.call(this, "Skip", Service);
+ this.syncs = 0;
+ }
+
+ SkipEngine.prototype = {
+ _sync() {
+ do_throw("Should have been skipped");
+ },
+ shouldSkipSync() {
+ return true;
+ },
+ };
+ Object.setPrototypeOf(SkipEngine.prototype, SyncEngine.prototype);
+ await Service.engineManager.register(SkipEngine);
+
+ let engine = Service.engineManager.get("skip");
+ engine.enabled = true;
+ engine._tracker._score = 30;
+
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Assert.ok(await setUp(server));
+
+ let resyncDoneObserver = promiseOneObserver("weave:service:resyncs-finished");
+
+ let synced = false;
+ function onSyncStarted() {
+ Assert.ok(!synced, "Only should sync once");
+ synced = true;
+ }
+
+ await Service.sync();
+
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Svc.Obs.add("weave:service:sync:start", onSyncStarted);
+ await resyncDoneObserver;
+
+ Svc.Obs.remove("weave:service:sync:start", onSyncStarted);
+ engine._tracker._store = 0;
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_updateClientMode() {
+ _(
+ "Test updateClientMode adjusts scheduling attributes based on # of clients appropriately"
+ );
+ Assert.equal(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(!scheduler.idle);
+
+ // Trigger a change in interval & threshold by noting there are multiple clients.
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 1);
+ scheduler.updateClientMode();
+
+ Assert.equal(scheduler.syncThreshold, MULTI_DEVICE_THRESHOLD);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.idle);
+
+ // Resets the number of clients to 0.
+ await clientsEngine.resetClient();
+ Svc.PrefBranch.clearUserPref("clients.devices.mobile");
+ scheduler.updateClientMode();
+
+ // Goes back to single user if # clients is 1.
+ Assert.equal(scheduler.numClients, 1);
+ Assert.equal(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(!scheduler.idle);
+
+ await cleanUpAndGo();
+});
+
+add_task(async function test_masterpassword_locked_retry_interval() {
+ enableValidationPrefs();
+
+ _(
+ "Test Status.login = MASTER_PASSWORD_LOCKED results in reschedule at MASTER_PASSWORD interval"
+ );
+ let loginFailed = false;
+ Svc.Obs.add("weave:service:login:error", function onLoginError() {
+ Svc.Obs.remove("weave:service:login:error", onLoginError);
+ loginFailed = true;
+ });
+
+ let rescheduleInterval = false;
+
+ let oldScheduleAtInterval = SyncScheduler.prototype.scheduleAtInterval;
+ SyncScheduler.prototype.scheduleAtInterval = function (interval) {
+ rescheduleInterval = true;
+ Assert.equal(interval, MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
+ };
+
+ let oldVerifyLogin = Service.verifyLogin;
+ Service.verifyLogin = async function () {
+ Status.login = MASTER_PASSWORD_LOCKED;
+ return false;
+ };
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ await Service.sync();
+
+ Assert.ok(loginFailed);
+ Assert.equal(Status.login, MASTER_PASSWORD_LOCKED);
+ Assert.ok(rescheduleInterval);
+
+ Service.verifyLogin = oldVerifyLogin;
+ SyncScheduler.prototype.scheduleAtInterval = oldScheduleAtInterval;
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_calculateBackoff() {
+ Assert.equal(Status.backoffInterval, 0);
+
+ // Test no interval larger than the maximum backoff is used if
+ // Status.backoffInterval is smaller.
+ Status.backoffInterval = 5;
+ let backoffInterval = Utils.calculateBackoff(
+ 50,
+ MAXIMUM_BACKOFF_INTERVAL,
+ Status.backoffInterval
+ );
+
+ Assert.equal(backoffInterval, MAXIMUM_BACKOFF_INTERVAL);
+
+ // Test Status.backoffInterval is used if it is
+ // larger than MAXIMUM_BACKOFF_INTERVAL.
+ Status.backoffInterval = MAXIMUM_BACKOFF_INTERVAL + 10;
+ backoffInterval = Utils.calculateBackoff(
+ 50,
+ MAXIMUM_BACKOFF_INTERVAL,
+ Status.backoffInterval
+ );
+
+ Assert.equal(backoffInterval, MAXIMUM_BACKOFF_INTERVAL + 10);
+
+ await cleanUpAndGo();
+});
+
+add_task(async function test_scheduleNextSync_nowOrPast() {
+ enableValidationPrefs();
+
+ let promiseObserved = promiseOneObserver("weave:service:sync:finish");
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // We're late for a sync...
+ scheduler.scheduleNextSync(-1);
+ await promiseObserved;
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_scheduleNextSync_future_noBackoff() {
+ enableValidationPrefs();
+
+ _(
+ "scheduleNextSync() uses the current syncInterval if no interval is provided."
+ );
+ // Test backoffInterval is 0 as expected.
+ Assert.equal(Status.backoffInterval, 0);
+
+ _("Test setting sync interval when nextSync == 0");
+ scheduler.nextSync = 0;
+ scheduler.scheduleNextSync();
+
+ // nextSync - Date.now() might be smaller than expectedInterval
+ // since some time has passed since we called scheduleNextSync().
+ Assert.ok(scheduler.nextSync - Date.now() <= scheduler.syncInterval);
+ Assert.equal(scheduler.syncTimer.delay, scheduler.syncInterval);
+
+ _("Test setting sync interval when nextSync != 0");
+ scheduler.nextSync = Date.now() + scheduler.singleDeviceInterval;
+ scheduler.scheduleNextSync();
+
+ // nextSync - Date.now() might be smaller than expectedInterval
+ // since some time has passed since we called scheduleNextSync().
+ Assert.ok(scheduler.nextSync - Date.now() <= scheduler.syncInterval);
+ Assert.ok(scheduler.syncTimer.delay <= scheduler.syncInterval);
+
+ _(
+ "Scheduling requests for intervals larger than the current one will be ignored."
+ );
+ // Request a sync at a longer interval. The sync that's already scheduled
+ // for sooner takes precedence.
+ let nextSync = scheduler.nextSync;
+ let timerDelay = scheduler.syncTimer.delay;
+ let requestedInterval = scheduler.syncInterval * 10;
+ scheduler.scheduleNextSync(requestedInterval);
+ Assert.equal(scheduler.nextSync, nextSync);
+ Assert.equal(scheduler.syncTimer.delay, timerDelay);
+
+ // We can schedule anything we want if there isn't a sync scheduled.
+ scheduler.nextSync = 0;
+ scheduler.scheduleNextSync(requestedInterval);
+ Assert.ok(scheduler.nextSync <= Date.now() + requestedInterval);
+ Assert.equal(scheduler.syncTimer.delay, requestedInterval);
+
+ // Request a sync at the smallest possible interval (0 triggers now).
+ scheduler.scheduleNextSync(1);
+ Assert.ok(scheduler.nextSync <= Date.now() + 1);
+ Assert.equal(scheduler.syncTimer.delay, 1);
+
+ await cleanUpAndGo();
+});
+
+add_task(async function test_scheduleNextSync_future_backoff() {
+ enableValidationPrefs();
+
+ _("scheduleNextSync() will honour backoff in all scheduling requests.");
+ // Let's take a backoff interval that's bigger than the default sync interval.
+ const BACKOFF = 7337;
+ Status.backoffInterval = scheduler.syncInterval + BACKOFF;
+
+ _("Test setting sync interval when nextSync == 0");
+ scheduler.nextSync = 0;
+ scheduler.scheduleNextSync();
+
+ // nextSync - Date.now() might be smaller than expectedInterval
+ // since some time has passed since we called scheduleNextSync().
+ Assert.ok(scheduler.nextSync - Date.now() <= Status.backoffInterval);
+ Assert.equal(scheduler.syncTimer.delay, Status.backoffInterval);
+
+ _("Test setting sync interval when nextSync != 0");
+ scheduler.nextSync = Date.now() + scheduler.singleDeviceInterval;
+ scheduler.scheduleNextSync();
+
+ // nextSync - Date.now() might be smaller than expectedInterval
+ // since some time has passed since we called scheduleNextSync().
+ Assert.ok(scheduler.nextSync - Date.now() <= Status.backoffInterval);
+ Assert.ok(scheduler.syncTimer.delay <= Status.backoffInterval);
+
+ // Request a sync at a longer interval. The sync that's already scheduled
+ // for sooner takes precedence.
+ let nextSync = scheduler.nextSync;
+ let timerDelay = scheduler.syncTimer.delay;
+ let requestedInterval = scheduler.syncInterval * 10;
+ Assert.ok(requestedInterval > Status.backoffInterval);
+ scheduler.scheduleNextSync(requestedInterval);
+ Assert.equal(scheduler.nextSync, nextSync);
+ Assert.equal(scheduler.syncTimer.delay, timerDelay);
+
+ // We can schedule anything we want if there isn't a sync scheduled.
+ scheduler.nextSync = 0;
+ scheduler.scheduleNextSync(requestedInterval);
+ Assert.ok(scheduler.nextSync <= Date.now() + requestedInterval);
+ Assert.equal(scheduler.syncTimer.delay, requestedInterval);
+
+ // Request a sync at the smallest possible interval (0 triggers now).
+ scheduler.scheduleNextSync(1);
+ Assert.ok(scheduler.nextSync <= Date.now() + Status.backoffInterval);
+ Assert.equal(scheduler.syncTimer.delay, Status.backoffInterval);
+
+ await cleanUpAndGo();
+});
+
+add_task(async function test_handleSyncError() {
+ enableValidationPrefs();
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Force sync to fail.
+ Svc.PrefBranch.setStringPref("firstSync", "notReady");
+
+ _("Ensure expected initial environment.");
+ Assert.equal(scheduler._syncErrors, 0);
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.equal(Status.backoffInterval, 0);
+
+ // Trigger sync with an error several times & observe
+ // functionality of handleSyncError()
+ _("Test first error calls scheduleNextSync on default interval");
+ await Service.sync();
+ Assert.ok(scheduler.nextSync <= Date.now() + scheduler.singleDeviceInterval);
+ Assert.equal(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
+ Assert.equal(scheduler._syncErrors, 1);
+ Assert.ok(!Status.enforceBackoff);
+ scheduler.syncTimer.clear();
+
+ _("Test second error still calls scheduleNextSync on default interval");
+ await Service.sync();
+ Assert.ok(scheduler.nextSync <= Date.now() + scheduler.singleDeviceInterval);
+ Assert.equal(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
+ Assert.equal(scheduler._syncErrors, 2);
+ Assert.ok(!Status.enforceBackoff);
+ scheduler.syncTimer.clear();
+
+ _("Test third error sets Status.enforceBackoff and calls scheduleAtInterval");
+ await Service.sync();
+ let maxInterval = scheduler._syncErrors * (2 * MINIMUM_BACKOFF_INTERVAL);
+ Assert.equal(Status.backoffInterval, 0);
+ Assert.ok(scheduler.nextSync <= Date.now() + maxInterval);
+ Assert.ok(scheduler.syncTimer.delay <= maxInterval);
+ Assert.equal(scheduler._syncErrors, 3);
+ Assert.ok(Status.enforceBackoff);
+
+ // Status.enforceBackoff is false but there are still errors.
+ Status.resetBackoff();
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(scheduler._syncErrors, 3);
+ scheduler.syncTimer.clear();
+
+ _(
+ "Test fourth error still calls scheduleAtInterval even if enforceBackoff was reset"
+ );
+ await Service.sync();
+ maxInterval = scheduler._syncErrors * (2 * MINIMUM_BACKOFF_INTERVAL);
+ Assert.ok(scheduler.nextSync <= Date.now() + maxInterval);
+ Assert.ok(scheduler.syncTimer.delay <= maxInterval);
+ Assert.equal(scheduler._syncErrors, 4);
+ Assert.ok(Status.enforceBackoff);
+ scheduler.syncTimer.clear();
+
+ _("Arrange for a successful sync to reset the scheduler error count");
+ let promiseObserved = promiseOneObserver("weave:service:sync:finish");
+ Svc.PrefBranch.setStringPref("firstSync", "wipeRemote");
+ scheduler.scheduleNextSync(-1);
+ await promiseObserved;
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_client_sync_finish_updateClientMode() {
+ enableValidationPrefs();
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Confirm defaults.
+ Assert.equal(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.ok(!scheduler.idle);
+
+ // Trigger a change in interval & threshold by adding a client.
+ await clientsEngine._store.create({
+ id: "foo",
+ cleartext: { os: "mobile", version: "0.01", type: "desktop" },
+ });
+ Assert.equal(false, scheduler.numClients > 1);
+ scheduler.updateClientMode();
+ await Service.sync();
+
+ Assert.equal(scheduler.syncThreshold, MULTI_DEVICE_THRESHOLD);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+ Assert.ok(scheduler.numClients > 1);
+ Assert.ok(!scheduler.idle);
+
+ // Resets the number of clients to 0.
+ await clientsEngine.resetClient();
+ // Also re-init the server, or we suck our "foo" client back down.
+ await setUp(server);
+
+ await Service.sync();
+
+ // Goes back to single user if # clients is 1.
+ Assert.equal(scheduler.numClients, 1);
+ Assert.equal(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+ Assert.equal(false, scheduler.numClients > 1);
+ Assert.ok(!scheduler.idle);
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_autoconnect_nextSync_past() {
+ enableValidationPrefs();
+
+ let promiseObserved = promiseOneObserver("weave:service:sync:finish");
+ // nextSync will be 0 by default, so it's way in the past.
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ scheduler.autoConnect();
+ await promiseObserved;
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_autoconnect_nextSync_future() {
+ enableValidationPrefs();
+
+ let previousSync = Date.now() + scheduler.syncInterval / 2;
+ scheduler.nextSync = previousSync;
+ // nextSync rounds to the nearest second.
+ let expectedSync = scheduler.nextSync;
+ let expectedInterval = expectedSync - Date.now() - 1000;
+
+ // Ensure we don't actually try to sync (or log in for that matter).
+ function onLoginStart() {
+ do_throw("Should not get here!");
+ }
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+
+ await configureIdentity({ username: "johndoe@mozilla.com" });
+ scheduler.autoConnect();
+ await promiseZeroTimer();
+
+ Assert.equal(scheduler.nextSync, expectedSync);
+ Assert.ok(scheduler.syncTimer.delay >= expectedInterval);
+
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+ await cleanUpAndGo();
+});
+
+add_task(async function test_autoconnect_mp_locked() {
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Pretend user did not unlock master password.
+ let origLocked = Utils.mpLocked;
+ Utils.mpLocked = () => true;
+
+ let origEnsureMPUnlocked = Utils.ensureMPUnlocked;
+ Utils.ensureMPUnlocked = () => {
+ _("Faking Master Password entry cancelation.");
+ return false;
+ };
+ let origFxA = Service.identity._fxaService;
+ Service.identity._fxaService = new FxAccounts({
+ currentAccountState: {
+ getUserAccountData(...args) {
+ return origFxA._internal.currentAccountState.getUserAccountData(
+ ...args
+ );
+ },
+ },
+ keys: {
+ canGetKeyForScope() {
+ return false;
+ },
+ },
+ });
+ // A locked master password will still trigger a sync, but then we'll hit
+ // MASTER_PASSWORD_LOCKED and hence MASTER_PASSWORD_LOCKED_RETRY_INTERVAL.
+ let promiseObserved = promiseOneObserver("weave:service:login:error");
+
+ scheduler.autoConnect();
+ await promiseObserved;
+
+ await Async.promiseYield();
+
+ Assert.equal(Status.login, MASTER_PASSWORD_LOCKED);
+
+ Utils.mpLocked = origLocked;
+ Utils.ensureMPUnlocked = origEnsureMPUnlocked;
+ Service.identity._fxaService = origFxA;
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_no_autoconnect_during_wizard() {
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Simulate the Sync setup wizard.
+ Svc.PrefBranch.setStringPref("firstSync", "notReady");
+
+ // Ensure we don't actually try to sync (or log in for that matter).
+ function onLoginStart() {
+ do_throw("Should not get here!");
+ }
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+
+ scheduler.autoConnect(0);
+ await promiseZeroTimer();
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_no_autoconnect_status_not_ok() {
+ let server = await sync_httpd_setup();
+ Status.__authManager = Service.identity = new SyncAuthManager();
+
+ // Ensure we don't actually try to sync (or log in for that matter).
+ function onLoginStart() {
+ do_throw("Should not get here!");
+ }
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+
+ scheduler.autoConnect();
+ await promiseZeroTimer();
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+
+ Assert.equal(Status.service, CLIENT_NOT_CONFIGURED);
+ Assert.equal(Status.login, LOGIN_FAILED_NO_USERNAME);
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_idle_adjustSyncInterval() {
+ // Confirm defaults.
+ Assert.equal(scheduler.idle, false);
+
+ // Single device: nothing changes.
+ scheduler.observe(
+ null,
+ "idle",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ Assert.equal(scheduler.idle, true);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+
+ // Multiple devices: switch to idle interval.
+ scheduler.idle = false;
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 1);
+ scheduler.updateClientMode();
+ scheduler.observe(
+ null,
+ "idle",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ Assert.equal(scheduler.idle, true);
+ Assert.equal(scheduler.syncInterval, scheduler.idleInterval);
+
+ await cleanUpAndGo();
+});
+
+add_task(async function test_back_triggersSync() {
+ // Confirm defaults.
+ Assert.ok(!scheduler.idle);
+ Assert.equal(Status.backoffInterval, 0);
+
+ // Set up: Define 2 clients and put the system in idle.
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 1);
+ scheduler.observe(
+ null,
+ "idle",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ Assert.ok(scheduler.idle);
+
+ // We don't actually expect the sync (or the login, for that matter) to
+ // succeed. We just want to ensure that it was attempted.
+ let promiseObserved = promiseOneObserver("weave:service:login:error");
+
+ // Send an 'active' event to trigger sync soonish.
+ scheduler.observe(
+ null,
+ "active",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ await promiseObserved;
+ await cleanUpAndGo();
+});
+
+add_task(async function test_active_triggersSync_observesBackoff() {
+ // Confirm defaults.
+ Assert.ok(!scheduler.idle);
+
+ // Set up: Set backoff, define 2 clients and put the system in idle.
+ const BACKOFF = 7337;
+ Status.backoffInterval = scheduler.idleInterval + BACKOFF;
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 1);
+ scheduler.observe(
+ null,
+ "idle",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ Assert.equal(scheduler.idle, true);
+
+ function onLoginStart() {
+ do_throw("Shouldn't have kicked off a sync!");
+ }
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+
+ let promiseTimer = promiseNamedTimer(
+ IDLE_OBSERVER_BACK_DELAY * 1.5,
+ {},
+ "timer"
+ );
+
+ // Send an 'active' event to try to trigger sync soonish.
+ scheduler.observe(
+ null,
+ "active",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ await promiseTimer;
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+
+ Assert.ok(scheduler.nextSync <= Date.now() + Status.backoffInterval);
+ Assert.equal(scheduler.syncTimer.delay, Status.backoffInterval);
+
+ await cleanUpAndGo();
+});
+
+add_task(async function test_back_debouncing() {
+ _(
+ "Ensure spurious back-then-idle events, as observed on OS X, don't trigger a sync."
+ );
+
+ // Confirm defaults.
+ Assert.equal(scheduler.idle, false);
+
+ // Set up: Define 2 clients and put the system in idle.
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 1);
+ scheduler.observe(
+ null,
+ "idle",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ Assert.equal(scheduler.idle, true);
+
+ function onLoginStart() {
+ do_throw("Shouldn't have kicked off a sync!");
+ }
+ Svc.Obs.add("weave:service:login:start", onLoginStart);
+
+ // Create spurious back-then-idle events as observed on OS X:
+ scheduler.observe(
+ null,
+ "active",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+ scheduler.observe(
+ null,
+ "idle",
+ Svc.PrefBranch.getIntPref("scheduler.idleTime")
+ );
+
+ await promiseNamedTimer(IDLE_OBSERVER_BACK_DELAY * 1.5, {}, "timer");
+ Svc.Obs.remove("weave:service:login:start", onLoginStart);
+ await cleanUpAndGo();
+});
+
+add_task(async function test_no_sync_node() {
+ enableValidationPrefs();
+
+ // Test when Status.sync == NO_SYNC_NODE_FOUND
+ // it is not overwritten on sync:finish
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ let oldfc = Service.identity._findCluster;
+ Service.identity._findCluster = () => null;
+ Service.clusterURL = "";
+ try {
+ await Service.sync();
+ Assert.equal(Status.sync, NO_SYNC_NODE_FOUND);
+ Assert.equal(scheduler.syncTimer.delay, NO_SYNC_NODE_INTERVAL);
+
+ await cleanUpAndGo(server);
+ } finally {
+ Service.identity._findCluster = oldfc;
+ }
+});
+
+add_task(async function test_sync_failed_partial_500s() {
+ enableValidationPrefs();
+
+ _("Test a 5xx status calls handleSyncError.");
+ scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
+ let server = await sync_httpd_setup();
+
+ let engine = Service.engineManager.get("catapult");
+ engine.enabled = true;
+ engine.exception = { status: 500 };
+
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Assert.ok(await setUp(server));
+
+ await Service.sync();
+
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+
+ let maxInterval = scheduler._syncErrors * (2 * MINIMUM_BACKOFF_INTERVAL);
+ Assert.equal(Status.backoffInterval, 0);
+ Assert.ok(Status.enforceBackoff);
+ Assert.equal(scheduler._syncErrors, 4);
+ Assert.ok(scheduler.nextSync <= Date.now() + maxInterval);
+ Assert.ok(scheduler.syncTimer.delay <= maxInterval);
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_sync_failed_partial_noresync() {
+ enableValidationPrefs();
+ let server = await sync_httpd_setup();
+
+ let engine = Service.engineManager.get("catapult");
+ engine.enabled = true;
+ engine.exception = "Bad news";
+ engine._tracker._score = MULTI_DEVICE_THRESHOLD + 1;
+
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Assert.ok(await setUp(server));
+
+ let resyncDoneObserver = promiseOneObserver("weave:service:resyncs-finished");
+
+ await Service.sync();
+
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+
+ function onSyncStarted() {
+ do_throw("Should not start resync when previous sync failed");
+ }
+
+ Svc.Obs.add("weave:service:sync:start", onSyncStarted);
+ await resyncDoneObserver;
+
+ Svc.Obs.remove("weave:service:sync:start", onSyncStarted);
+ engine._tracker._store = 0;
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_sync_failed_partial_400s() {
+ enableValidationPrefs();
+
+ _("Test a non-5xx status doesn't call handleSyncError.");
+ scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
+ let server = await sync_httpd_setup();
+
+ let engine = Service.engineManager.get("catapult");
+ engine.enabled = true;
+ engine.exception = { status: 400 };
+
+ // Have multiple devices for an active interval.
+ await clientsEngine._store.create({
+ id: "foo",
+ cleartext: { os: "mobile", version: "0.01", type: "desktop" },
+ });
+
+ Assert.equal(Status.sync, SYNC_SUCCEEDED);
+
+ Assert.ok(await setUp(server));
+
+ await Service.sync();
+
+ Assert.equal(Status.service, SYNC_FAILED_PARTIAL);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+
+ Assert.equal(Status.backoffInterval, 0);
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(scheduler._syncErrors, 0);
+ Assert.ok(scheduler.nextSync <= Date.now() + scheduler.activeInterval);
+ Assert.ok(scheduler.syncTimer.delay <= scheduler.activeInterval);
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_sync_X_Weave_Backoff() {
+ enableValidationPrefs();
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Use an odd value on purpose so that it doesn't happen to coincide with one
+ // of the sync intervals.
+ const BACKOFF = 7337;
+
+ // Extend info/collections so that we can put it into server maintenance mode.
+ const INFO_COLLECTIONS = "/1.1/johndoe@mozilla.com/info/collections";
+ let infoColl = server._handler._overridePaths[INFO_COLLECTIONS];
+ let serverBackoff = false;
+ function infoCollWithBackoff(request, response) {
+ if (serverBackoff) {
+ response.setHeader("X-Weave-Backoff", "" + BACKOFF);
+ }
+ infoColl(request, response);
+ }
+ server.registerPathHandler(INFO_COLLECTIONS, infoCollWithBackoff);
+
+ // Pretend we have two clients so that the regular sync interval is
+ // sufficiently low.
+ await clientsEngine._store.create({
+ id: "foo",
+ cleartext: { os: "mobile", version: "0.01", type: "desktop" },
+ });
+ let rec = await clientsEngine._store.createRecord("foo", "clients");
+ await rec.encrypt(Service.collectionKeys.keyForCollection("clients"));
+ await rec.upload(Service.resource(clientsEngine.engineURL + rec.id));
+
+ // Sync once to log in and get everything set up. Let's verify our initial
+ // values.
+ await Service.sync();
+ Assert.equal(Status.backoffInterval, 0);
+ Assert.equal(Status.minimumNextSync, 0);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+ Assert.ok(scheduler.nextSync <= Date.now() + scheduler.syncInterval);
+ // Sanity check that we picked the right value for BACKOFF:
+ Assert.ok(scheduler.syncInterval < BACKOFF * 1000);
+
+ // Turn on server maintenance and sync again.
+ serverBackoff = true;
+ await Service.sync();
+
+ Assert.ok(Status.backoffInterval >= BACKOFF * 1000);
+ // Allowing 20 seconds worth of of leeway between when Status.minimumNextSync
+ // was set and when this line gets executed.
+ let minimumExpectedDelay = (BACKOFF - 20) * 1000;
+ Assert.ok(Status.minimumNextSync >= Date.now() + minimumExpectedDelay);
+
+ // Verify that the next sync is actually going to wait that long.
+ Assert.ok(scheduler.nextSync >= Date.now() + minimumExpectedDelay);
+ Assert.ok(scheduler.syncTimer.delay >= minimumExpectedDelay);
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_sync_503_Retry_After() {
+ enableValidationPrefs();
+
+ let server = await sync_httpd_setup();
+ await setUp(server);
+
+ // Use an odd value on purpose so that it doesn't happen to coincide with one
+ // of the sync intervals.
+ const BACKOFF = 7337;
+
+ // Extend info/collections so that we can put it into server maintenance mode.
+ const INFO_COLLECTIONS = "/1.1/johndoe@mozilla.com/info/collections";
+ let infoColl = server._handler._overridePaths[INFO_COLLECTIONS];
+ let serverMaintenance = false;
+ function infoCollWithMaintenance(request, response) {
+ if (!serverMaintenance) {
+ infoColl(request, response);
+ return;
+ }
+ response.setHeader("Retry-After", "" + BACKOFF);
+ response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
+ }
+ server.registerPathHandler(INFO_COLLECTIONS, infoCollWithMaintenance);
+
+ // Pretend we have two clients so that the regular sync interval is
+ // sufficiently low.
+ await clientsEngine._store.create({
+ id: "foo",
+ cleartext: { os: "mobile", version: "0.01", type: "desktop" },
+ });
+ let rec = await clientsEngine._store.createRecord("foo", "clients");
+ await rec.encrypt(Service.collectionKeys.keyForCollection("clients"));
+ await rec.upload(Service.resource(clientsEngine.engineURL + rec.id));
+
+ // Sync once to log in and get everything set up. Let's verify our initial
+ // values.
+ await Service.sync();
+ Assert.ok(!Status.enforceBackoff);
+ Assert.equal(Status.backoffInterval, 0);
+ Assert.equal(Status.minimumNextSync, 0);
+ Assert.equal(scheduler.syncInterval, scheduler.activeInterval);
+ Assert.ok(scheduler.nextSync <= Date.now() + scheduler.syncInterval);
+ // Sanity check that we picked the right value for BACKOFF:
+ Assert.ok(scheduler.syncInterval < BACKOFF * 1000);
+
+ // Turn on server maintenance and sync again.
+ serverMaintenance = true;
+ await Service.sync();
+
+ Assert.ok(Status.enforceBackoff);
+ Assert.ok(Status.backoffInterval >= BACKOFF * 1000);
+ // Allowing 3 seconds worth of of leeway between when Status.minimumNextSync
+ // was set and when this line gets executed.
+ let minimumExpectedDelay = (BACKOFF - 3) * 1000;
+ Assert.ok(Status.minimumNextSync >= Date.now() + minimumExpectedDelay);
+
+ // Verify that the next sync is actually going to wait that long.
+ Assert.ok(scheduler.nextSync >= Date.now() + minimumExpectedDelay);
+ Assert.ok(scheduler.syncTimer.delay >= minimumExpectedDelay);
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_loginError_recoverable_reschedules() {
+ _("Verify that a recoverable login error schedules a new sync.");
+ await configureIdentity({ username: "johndoe@mozilla.com" });
+ Service.clusterURL = "http://localhost:1234/";
+ Status.resetSync(); // reset Status.login
+
+ let promiseObserved = promiseOneObserver("weave:service:login:error");
+
+ // Let's set it up so that a sync is overdue, both in terms of previously
+ // scheduled syncs and the global score. We still do not expect an immediate
+ // sync because we just tried (duh).
+ scheduler.nextSync = Date.now() - 100000;
+ scheduler.globalScore = SINGLE_USER_THRESHOLD + 1;
+ function onSyncStart() {
+ do_throw("Shouldn't have started a sync!");
+ }
+ Svc.Obs.add("weave:service:sync:start", onSyncStart);
+
+ // Sanity check.
+ Assert.equal(scheduler.syncTimer, null);
+ Assert.equal(Status.checkSetup(), STATUS_OK);
+ Assert.equal(Status.login, LOGIN_SUCCEEDED);
+
+ scheduler.scheduleNextSync(0);
+ await promiseObserved;
+ await Async.promiseYield();
+
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+
+ let expectedNextSync = Date.now() + scheduler.syncInterval;
+ Assert.ok(scheduler.nextSync > Date.now());
+ Assert.ok(scheduler.nextSync <= expectedNextSync);
+ Assert.ok(scheduler.syncTimer.delay > 0);
+ Assert.ok(scheduler.syncTimer.delay <= scheduler.syncInterval);
+
+ Svc.Obs.remove("weave:service:sync:start", onSyncStart);
+ await cleanUpAndGo();
+});
+
+add_task(async function test_loginError_fatal_clearsTriggers() {
+ _("Verify that a fatal login error clears sync triggers.");
+ await configureIdentity({ username: "johndoe@mozilla.com" });
+
+ let server = httpd_setup({
+ "/1.1/johndoe@mozilla.com/info/collections": httpd_handler(
+ 401,
+ "Unauthorized"
+ ),
+ });
+
+ Service.clusterURL = server.baseURI + "/";
+ Status.resetSync(); // reset Status.login
+
+ let promiseObserved = promiseOneObserver("weave:service:login:error");
+
+ // Sanity check.
+ Assert.equal(scheduler.nextSync, 0);
+ Assert.equal(scheduler.syncTimer, null);
+ Assert.equal(Status.checkSetup(), STATUS_OK);
+ Assert.equal(Status.login, LOGIN_SUCCEEDED);
+
+ scheduler.scheduleNextSync(0);
+ await promiseObserved;
+ await Async.promiseYield();
+
+ // For the FxA identity, a 401 on info/collections means a transient
+ // error, probably due to an inability to fetch a token.
+ Assert.equal(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+ // syncs should still be scheduled.
+ Assert.ok(scheduler.nextSync > Date.now());
+ Assert.ok(scheduler.syncTimer.delay > 0);
+
+ await cleanUpAndGo(server);
+});
+
+add_task(async function test_proper_interval_on_only_failing() {
+ _("Ensure proper behavior when only failed records are applied.");
+
+ // If an engine reports that no records succeeded, we shouldn't decrease the
+ // sync interval.
+ Assert.ok(!scheduler.hasIncomingItems);
+ const INTERVAL = 10000000;
+ scheduler.syncInterval = INTERVAL;
+
+ Svc.Obs.notify("weave:service:sync:applied", {
+ applied: 2,
+ succeeded: 0,
+ failed: 2,
+ newFailed: 2,
+ reconciled: 0,
+ });
+
+ await Async.promiseYield();
+ scheduler.adjustSyncInterval();
+ Assert.ok(!scheduler.hasIncomingItems);
+ Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
+});
+
+add_task(async function test_link_status_change() {
+ _("Check that we only attempt to sync when link status is up");
+ try {
+ sinon.spy(scheduler, "scheduleNextSync");
+
+ Svc.Obs.notify("network:link-status-changed", null, "down");
+ equal(scheduler.scheduleNextSync.callCount, 0);
+
+ Svc.Obs.notify("network:link-status-changed", null, "change");
+ equal(scheduler.scheduleNextSync.callCount, 0);
+
+ Svc.Obs.notify("network:link-status-changed", null, "up");
+ equal(scheduler.scheduleNextSync.callCount, 1);
+
+ Svc.Obs.notify("network:link-status-changed", null, "change");
+ equal(scheduler.scheduleNextSync.callCount, 1);
+ } finally {
+ scheduler.scheduleNextSync.restore();
+ }
+});
diff --git a/services/sync/tests/unit/test_tab_engine.js b/services/sync/tests/unit/test_tab_engine.js
new file mode 100644
index 0000000000..5b1e61871e
--- /dev/null
+++ b/services/sync/tests/unit/test_tab_engine.js
@@ -0,0 +1,226 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { TabProvider } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/tabs.sys.mjs"
+);
+const { WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+let engine;
+// We'll need the clients engine for testing as tabs is closely related
+let clientsEngine;
+
+async function syncClientsEngine(server) {
+ clientsEngine._lastFxADevicesFetch = 0;
+ clientsEngine.lastModified = server.getCollection("foo", "clients").timestamp;
+ await clientsEngine._sync();
+}
+
+async function makeRemoteClients() {
+ let server = await serverForFoo(clientsEngine);
+ await configureIdentity({ username: "foo" }, server);
+ await Service.login();
+
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let remoteId = Utils.makeGUID();
+ let remoteId2 = Utils.makeGUID();
+ let collection = server.getCollection("foo", "clients");
+
+ _("Create remote client records");
+ collection.insertRecord({
+ id: remoteId,
+ name: "Remote client",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ fxaDeviceId: remoteId,
+ fxaDeviceName: "Fxa - Remote client",
+ protocols: ["1.5"],
+ });
+
+ collection.insertRecord({
+ id: remoteId2,
+ name: "Remote client 2",
+ type: "desktop",
+ commands: [],
+ version: "48",
+ fxaDeviceId: remoteId2,
+ fxaDeviceName: "Fxa - Remote client 2",
+ protocols: ["1.5"],
+ });
+
+ let fxAccounts = clientsEngine.fxAccounts;
+ clientsEngine.fxAccounts = {
+ notifyDevices() {
+ return Promise.resolve(true);
+ },
+ device: {
+ getLocalId() {
+ return fxAccounts.device.getLocalId();
+ },
+ getLocalName() {
+ return fxAccounts.device.getLocalName();
+ },
+ getLocalType() {
+ return fxAccounts.device.getLocalType();
+ },
+ recentDeviceList: [{ id: remoteId, name: "remote device" }],
+ refreshDeviceList() {
+ return Promise.resolve(true);
+ },
+ },
+ _internal: {
+ now() {
+ return Date.now();
+ },
+ },
+ };
+
+ await syncClientsEngine(server);
+}
+
+add_task(async function setup() {
+ clientsEngine = Service.clientsEngine;
+ // Make some clients to test with
+ await makeRemoteClients();
+
+ // Make the tabs engine for all the tests to use
+ engine = Service.engineManager.get("tabs");
+ await engine.initialize();
+
+ // Since these are xpcshell tests, we'll need to mock this
+ TabProvider.shouldSkipWindow = mockShouldSkipWindow;
+});
+
+add_task(async function test_tab_engine_skips_incoming_local_record() {
+ _("Ensure incoming records that match local client ID are never applied.");
+
+ let localID = clientsEngine.localID;
+ let collection = new ServerCollection();
+
+ _("Creating remote tab record with local client ID");
+ let localRecord = encryptPayload({
+ id: localID,
+ clientName: "local",
+ tabs: [
+ {
+ title: "title",
+ urlHistory: ["http://foo.com/"],
+ icon: "",
+ lastUsed: 2000,
+ },
+ ],
+ });
+ collection.insert(localID, localRecord);
+
+ _("Creating remote tab record with a different client ID");
+ let remoteID = "fake-guid-00"; // remote should match one of the test clients
+ let remoteRecord = encryptPayload({
+ id: remoteID,
+ clientName: "not local",
+ tabs: [
+ {
+ title: "title2",
+ urlHistory: ["http://bar.com/"],
+ icon: "",
+ lastUsed: 3000,
+ },
+ ],
+ });
+ collection.insert(remoteID, remoteRecord);
+
+ _("Setting up Sync server");
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/tabs": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = {
+ tabs: { version: engine.version, syncID },
+ };
+
+ await generateNewKeys(Service.collectionKeys);
+
+ let promiseFinished = new Promise(resolve => {
+ let syncFinish = engine._syncFinish;
+ engine._syncFinish = async function () {
+ let remoteTabs = await engine._rustStore.getAll();
+ equal(
+ remoteTabs.length,
+ 1,
+ "Remote client record was applied and local wasn't"
+ );
+ let record = remoteTabs[0];
+ equal(record.clientId, remoteID, "Remote client ID matches");
+
+ _("Ensure getAllClients returns the correct shape");
+ let clients = await engine.getAllClients();
+ equal(clients.length, 1);
+ let client = clients[0];
+ equal(client.id, "fake-guid-00");
+ equal(client.name, "Remote client");
+ equal(client.type, "desktop");
+ Assert.ok(client.lastModified); // lastModified should be filled in once serverModified is populated from the server
+ deepEqual(client.tabs, [
+ {
+ title: "title2",
+ urlHistory: ["http://bar.com/"],
+ icon: "",
+ inactive: false,
+ lastUsed: 3000,
+ },
+ ]);
+ await syncFinish.call(engine);
+ resolve();
+ };
+ });
+
+ _("Start sync");
+ Service.scheduler.hasIncomingItems = false;
+ await engine._sync();
+ await promiseFinished;
+ // Bug 1800185 - we don't want the sync scheduler to see these records as incoming.
+ Assert.ok(!Service.scheduler.hasIncomingItems);
+});
+
+// Ensure we trim tabs in the case of going past the max payload size allowed
+add_task(async function test_too_many_tabs() {
+ let a_lot_of_tabs = [];
+
+ for (let i = 0; i < 4000; ++i) {
+ a_lot_of_tabs.push(
+ `http://example${i}.com/some-super-long-url-chain-to-help-with-bytes`
+ );
+ }
+
+ TabProvider.getWindowEnumerator = mockGetWindowEnumerator.bind(
+ this,
+ a_lot_of_tabs
+ );
+
+ let encoder = Utils.utf8Encoder;
+ // see tryfitItems(..) in util.js
+ const computeSerializedSize = records =>
+ encoder.encode(JSON.stringify(records)).byteLength;
+
+ const maxPayloadSize = Service.getMaxRecordPayloadSize();
+ const maxSerializedSize = (maxPayloadSize / 4) * 3 - 1500;
+ // We are over max payload size
+ Assert.ok(computeSerializedSize(a_lot_of_tabs) > maxSerializedSize);
+ let tabs = await engine.getTabsWithinPayloadSize();
+ // We are now under max payload size
+ Assert.ok(computeSerializedSize(tabs) < maxSerializedSize);
+});
diff --git a/services/sync/tests/unit/test_tab_provider.js b/services/sync/tests/unit/test_tab_provider.js
new file mode 100644
index 0000000000..bbf68dea33
--- /dev/null
+++ b/services/sync/tests/unit/test_tab_provider.js
@@ -0,0 +1,64 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { TabProvider } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/tabs.sys.mjs"
+);
+
+add_task(async function test_getAllTabs() {
+ let provider = TabProvider;
+ provider.shouldSkipWindow = mockShouldSkipWindow;
+
+ let tabs;
+
+ provider.getWindowEnumerator = mockGetWindowEnumerator.bind(this, [
+ "http://bar.com",
+ ]);
+
+ _("Get all tabs.");
+ tabs = await provider.getAllTabsWithEstimatedMax(
+ false,
+ Number.MAX_SAFE_INTEGER
+ );
+ _("Tabs: " + JSON.stringify(tabs));
+ equal(tabs.length, 1);
+ equal(tabs[0].title, "title");
+ equal(tabs[0].urlHistory.length, 1);
+ equal(tabs[0].urlHistory[0], "http://bar.com/");
+ equal(tabs[0].icon, "");
+ equal(tabs[0].lastUsed, 2); // windowenumerator returns in ms but the getAllTabs..() returns in seconds
+
+ _("Get all tabs, and check that filtering works.");
+ provider.getWindowEnumerator = mockGetWindowEnumerator.bind(this, [
+ "http://foo.com",
+ "about:foo",
+ ]);
+ tabs = await provider.getAllTabsWithEstimatedMax(
+ true,
+ Number.MAX_SAFE_INTEGER
+ );
+ _("Filtered: " + JSON.stringify(tabs));
+ equal(tabs.length, 1);
+
+ _("Get all tabs, and check that they are properly sorted");
+ provider.getWindowEnumerator = mockGetWindowEnumerator.bind(this, [
+ "http://foo.com",
+ "http://bar.com",
+ ]);
+ tabs = await provider.getAllTabsWithEstimatedMax(
+ true,
+ Number.MAX_SAFE_INTEGER
+ );
+ _("Ordered: " + JSON.stringify(tabs));
+ equal(tabs[0].lastUsed > tabs[1].lastUsed, true);
+
+ // reader mode URLs are provided.
+ provider.getWindowEnumerator = mockGetWindowEnumerator.bind(this, [
+ "about:reader?url=http%3A%2F%2Ffoo.com%2F",
+ ]);
+ tabs = await provider.getAllTabsWithEstimatedMax(
+ true,
+ Number.MAX_SAFE_INTEGER
+ );
+ equal(tabs[0].urlHistory[0], "http://foo.com/");
+});
diff --git a/services/sync/tests/unit/test_tab_quickwrite.js b/services/sync/tests/unit/test_tab_quickwrite.js
new file mode 100644
index 0000000000..2a1c75c8c6
--- /dev/null
+++ b/services/sync/tests/unit/test_tab_quickwrite.js
@@ -0,0 +1,204 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+ChromeUtils.importESModule("resource://services-sync/engines/tabs.sys.mjs");
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+const { TabProvider } = ChromeUtils.importESModule(
+ "resource://services-sync/engines/tabs.sys.mjs"
+);
+
+const FAR_FUTURE = 4102405200000; // 2100/01/01
+
+add_task(async function setup() {
+ // Since these are xpcshell tests, we'll need to mock ui features
+ TabProvider.shouldSkipWindow = mockShouldSkipWindow;
+ TabProvider.getWindowEnumerator = mockGetWindowEnumerator.bind(this, [
+ "http://foo.com",
+ ]);
+});
+
+async function prepareServer() {
+ _("Setting up Sync server");
+ Service.serverConfiguration = {
+ max_post_records: 100,
+ };
+
+ let server = new SyncServer();
+ server.start();
+ await SyncTestingInfrastructure(server, "username");
+ server.registerUser("username");
+
+ let collection = server.createCollection("username", "tabs");
+ await generateNewKeys(Service.collectionKeys);
+
+ let engine = Service.engineManager.get("tabs");
+ await engine.initialize();
+
+ return { server, collection, engine };
+}
+
+async function withPatchedValue(object, name, patchedVal, fn) {
+ _(`patching ${name}=${patchedVal}`);
+ let old = object[name];
+ object[name] = patchedVal;
+ try {
+ await fn();
+ } finally {
+ object[name] = old;
+ }
+}
+
+add_task(async function test_tab_quickwrite_works() {
+ _("Ensure a simple quickWrite works.");
+ let { server, collection, engine } = await prepareServer();
+ Assert.equal(collection.count(), 0, "starting with 0 tab records");
+ Assert.ok(await engine.quickWrite());
+ // Validate we didn't bork lastSync
+ let lastSync = await engine.getLastSync();
+ Assert.ok(lastSync < FAR_FUTURE);
+ Assert.equal(collection.count(), 1, "tab record was written");
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_tab_bad_status() {
+ _("Ensure quickWrite silently aborts when we aren't setup correctly.");
+ let { server, engine } = await prepareServer();
+ // Store the original lock to reset it back after this test
+ let lock = engine.lock;
+ // Arrange for this test to fail if it tries to take the lock.
+ engine.lock = function () {
+ throw new Error("this test should abort syncing before locking");
+ };
+ let quickWrite = engine.quickWrite.bind(engine); // lol javascript.
+
+ await withPatchedValue(engine, "enabled", false, quickWrite);
+ await withPatchedValue(Service, "serverConfiguration", null, quickWrite);
+
+ Services.prefs.clearUserPref("services.sync.username");
+ await quickWrite();
+ // Validate we didn't bork lastSync
+ let lastSync = await engine.getLastSync();
+ Assert.ok(lastSync < FAR_FUTURE);
+ Service.status.resetSync();
+ engine.lock = lock;
+ await promiseStopServer(server);
+});
+
+add_task(async function test_tab_quickwrite_lock() {
+ _("Ensure we fail to quickWrite if the engine is locked.");
+ let { server, collection, engine } = await prepareServer();
+
+ Assert.equal(collection.count(), 0, "starting with 0 tab records");
+ engine.lock();
+ Assert.ok(!(await engine.quickWrite()));
+ Assert.equal(collection.count(), 0, "didn't sync due to being locked");
+ engine.unlock();
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_tab_quickwrite_keeps_old_tabs() {
+ _("Ensure we don't delete other tabs on quickWrite (bug 1801295).");
+ let { server, engine } = await prepareServer();
+
+ // need a first sync to ensure everything is setup correctly.
+ await Service.sync({ engines: ["tabs"] });
+
+ const id = "fake-guid-99";
+ let remoteRecord = encryptPayload({
+ id,
+ clientName: "not local",
+ tabs: [
+ {
+ title: "title2",
+ urlHistory: ["http://bar.com/"],
+ icon: "",
+ lastUsed: 3000,
+ },
+ ],
+ });
+
+ let collection = server.getCollection("username", "tabs");
+ collection.insert(id, remoteRecord);
+
+ await Service.sync({ engines: ["tabs"] });
+
+ // collection should now have 2 records - ours and the pretend remote one we inserted.
+ Assert.equal(collection.count(), 2, "starting with 2 tab records");
+
+ // So fxAccounts.device.recentDeviceList is not null.
+ engine.service.clientsEngine.fxAccounts.device._deviceListCache = {
+ devices: [],
+ };
+ // trick the clients engine into thinking it has a remote client with the same guid.
+ engine.service.clientsEngine._store._remoteClients = {};
+ engine.service.clientsEngine._store._remoteClients[id] = {
+ id,
+ fxaDeviceId: id,
+ };
+
+ let clients = await engine.getAllClients();
+ Assert.equal(clients.length, 1);
+
+ _("Doing a quick-write");
+ Assert.ok(await engine.quickWrite());
+
+ // Should still have our client after a quickWrite.
+ _("Grabbing clients after the quick-write");
+ clients = await engine.getAllClients();
+ Assert.equal(clients.length, 1);
+
+ engine.service.clientsEngine._store._remoteClients = {};
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_tab_lastSync() {
+ _("Ensure we restore the lastSync timestamp after a quick-write.");
+ let { server, collection, engine } = await prepareServer();
+
+ await engine.initialize();
+ await engine.service.clientsEngine.initialize();
+
+ let origLastSync = engine.lastSync;
+ Assert.ok(await engine.quickWrite());
+ Assert.equal(engine.lastSync, origLastSync);
+ Assert.equal(collection.count(), 1, "successful sync");
+ engine.unlock();
+
+ await promiseStopServer(server);
+});
+
+add_task(async function test_tab_quickWrite_telemetry() {
+ _("Ensure we record the telemetry we expect.");
+ // hook into telemetry
+ let telem = get_sync_test_telemetry();
+ telem.payloads = [];
+ let oldSubmit = telem.submit;
+ let submitPromise = new Promise((resolve, reject) => {
+ telem.submit = function (ping) {
+ telem.submit = oldSubmit;
+ resolve(ping);
+ };
+ });
+
+ let { server, collection, engine } = await prepareServer();
+
+ Assert.equal(collection.count(), 0, "starting with 0 tab records");
+ Assert.ok(await engine.quickWrite());
+ Assert.equal(collection.count(), 1, "tab record was written");
+
+ let ping = await submitPromise;
+ let syncs = ping.syncs;
+ Assert.equal(syncs.length, 1);
+ let sync = syncs[0];
+ Assert.equal(sync.why, "quick-write");
+ Assert.equal(sync.engines.length, 1);
+ Assert.equal(sync.engines[0].name, "tabs");
+
+ await promiseStopServer(server);
+});
diff --git a/services/sync/tests/unit/test_tab_tracker.js b/services/sync/tests/unit/test_tab_tracker.js
new file mode 100644
index 0000000000..8bb71a898a
--- /dev/null
+++ b/services/sync/tests/unit/test_tab_tracker.js
@@ -0,0 +1,371 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+ChromeUtils.importESModule("resource://services-sync/engines/tabs.sys.mjs");
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+const { SyncScheduler } = ChromeUtils.importESModule(
+ "resource://services-sync/policies.sys.mjs"
+);
+
+var scheduler = new SyncScheduler(Service);
+let clientsEngine;
+
+add_task(async function setup() {
+ await Service.promiseInitialized;
+ clientsEngine = Service.clientsEngine;
+
+ scheduler.setDefaults();
+});
+
+function fakeSvcWinMediator() {
+ // actions on windows are captured in logs
+ let logs = [];
+ delete Services.wm;
+
+ function getNext() {
+ let elt = { addTopics: [], remTopics: [], numAPL: 0, numRPL: 0 };
+ logs.push(elt);
+ return {
+ addEventListener(topic) {
+ elt.addTopics.push(topic);
+ },
+ removeEventListener(topic) {
+ elt.remTopics.push(topic);
+ },
+ gBrowser: {
+ addProgressListener() {
+ elt.numAPL++;
+ },
+ removeProgressListener() {
+ elt.numRPL++;
+ },
+ },
+ };
+ }
+
+ Services.wm = {
+ getEnumerator() {
+ return [getNext(), getNext()];
+ },
+ };
+ return logs;
+}
+
+function fakeGetTabState(tab) {
+ return tab;
+}
+
+function clearQuickWriteTimer(tracker) {
+ if (tracker.tabsQuickWriteTimer) {
+ tracker.tabsQuickWriteTimer.clear();
+ }
+}
+
+add_task(async function run_test() {
+ let engine = Service.engineManager.get("tabs");
+ await engine.initialize();
+ _("We assume that tabs have changed at startup.");
+ let tracker = engine._tracker;
+ tracker.getTabState = fakeGetTabState;
+
+ Assert.ok(tracker.modified);
+ Assert.ok(
+ Utils.deepEquals(Object.keys(await engine.getChangedIDs()), [
+ clientsEngine.localID,
+ ])
+ );
+
+ let logs;
+
+ _("Test listeners are registered on windows");
+ logs = fakeSvcWinMediator();
+ tracker.start();
+ Assert.equal(logs.length, 2);
+ for (let log of logs) {
+ Assert.equal(log.addTopics.length, 3);
+ Assert.ok(log.addTopics.includes("TabOpen"));
+ Assert.ok(log.addTopics.includes("TabClose"));
+ Assert.ok(log.addTopics.includes("unload"));
+ Assert.equal(log.remTopics.length, 0);
+ Assert.equal(log.numAPL, 1, "Added 1 progress listener");
+ Assert.equal(log.numRPL, 0, "Didn't remove a progress listener");
+ }
+
+ _("Test listeners are unregistered on windows");
+ logs = fakeSvcWinMediator();
+ await tracker.stop();
+ Assert.equal(logs.length, 2);
+ for (let log of logs) {
+ Assert.equal(log.addTopics.length, 0);
+ Assert.equal(log.remTopics.length, 3);
+ Assert.ok(log.remTopics.includes("TabOpen"));
+ Assert.ok(log.remTopics.includes("TabClose"));
+ Assert.ok(log.remTopics.includes("unload"));
+ Assert.equal(log.numAPL, 0, "Didn't add a progress listener");
+ Assert.equal(log.numRPL, 1, "Removed 1 progress listener");
+ }
+
+ _("Test tab listener");
+ for (let evttype of ["TabOpen", "TabClose"]) {
+ // Pretend we just synced.
+ await tracker.clearChangedIDs();
+ Assert.ok(!tracker.modified);
+
+ // Send a fake tab event
+ tracker.onTab({
+ type: evttype,
+ originalTarget: evttype,
+ target: { entries: [], currentURI: "about:config" },
+ });
+ Assert.ok(tracker.modified);
+ Assert.ok(
+ Utils.deepEquals(Object.keys(await engine.getChangedIDs()), [
+ clientsEngine.localID,
+ ])
+ );
+ }
+
+ // Pretend we just synced.
+ await tracker.clearChangedIDs();
+ Assert.ok(!tracker.modified);
+
+ tracker.onTab({
+ type: "TabOpen",
+ originalTarget: "TabOpen",
+ target: { entries: [], currentURI: "about:config" },
+ });
+ Assert.ok(
+ Utils.deepEquals(Object.keys(await engine.getChangedIDs()), [
+ clientsEngine.localID,
+ ])
+ );
+
+ // Pretend we just synced and saw some progress listeners.
+ await tracker.clearChangedIDs();
+ Assert.ok(!tracker.modified);
+ tracker.onLocationChange({ isTopLevel: false }, undefined, undefined, 0);
+ Assert.ok(!tracker.modified, "non-toplevel request didn't flag as modified");
+
+ tracker.onLocationChange(
+ { isTopLevel: true },
+ undefined,
+ Services.io.newURI("https://www.mozilla.org"),
+ Ci.nsIWebProgressListener.LOCATION_CHANGE_SAME_DOCUMENT
+ );
+ Assert.ok(
+ tracker.modified,
+ "location change within the same document request did flag as modified"
+ );
+
+ tracker.onLocationChange(
+ { isTopLevel: true },
+ undefined,
+ Services.io.newURI("https://www.mozilla.org")
+ );
+ Assert.ok(
+ tracker.modified,
+ "location change for a new top-level document flagged as modified"
+ );
+ Assert.ok(
+ Utils.deepEquals(Object.keys(await engine.getChangedIDs()), [
+ clientsEngine.localID,
+ ])
+ );
+});
+
+add_task(async function run_sync_on_tab_change_test() {
+ let testPrefDelay = 20000;
+
+ // This is the pref that determines sync delay after tab change
+ Svc.PrefBranch.setIntPref(
+ "syncedTabs.syncDelayAfterTabChange",
+ testPrefDelay
+ );
+ // We should only be syncing on tab change if
+ // the user has > 1 client
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 1);
+ scheduler.updateClientMode();
+ Assert.equal(scheduler.numClients, 2);
+
+ let engine = Service.engineManager.get("tabs");
+
+ _("We assume that tabs have changed at startup.");
+ let tracker = engine._tracker;
+ tracker.getTabState = fakeGetTabState;
+
+ Assert.ok(tracker.modified);
+ Assert.ok(
+ Utils.deepEquals(Object.keys(await engine.getChangedIDs()), [
+ clientsEngine.localID,
+ ])
+ );
+
+ _("Test sync is scheduled after a tab change");
+ for (let evttype of ["TabOpen", "TabClose"]) {
+ // Pretend we just synced
+ await tracker.clearChangedIDs();
+ clearQuickWriteTimer(tracker);
+
+ // Send a fake tab event
+ tracker.onTab({
+ type: evttype,
+ originalTarget: evttype,
+ target: { entries: [], currentURI: "about:config" },
+ });
+ // Ensure the tracker fired
+ Assert.ok(tracker.modified);
+ // We should be more delayed at or more than what the pref is set at
+ let nextSchedule = tracker.tabsQuickWriteTimer.delay;
+ Assert.ok(nextSchedule >= testPrefDelay);
+ }
+
+ _("Test sync is NOT scheduled after an unsupported tab open");
+ for (let evttype of ["TabOpen"]) {
+ // Send a fake tab event
+ tracker.onTab({
+ type: evttype,
+ originalTarget: evttype,
+ target: { entries: ["about:newtab"], currentURI: null },
+ });
+ // Ensure the tracker fired
+ Assert.ok(tracker.modified);
+ // We should be scheduling <= pref value
+ Assert.ok(scheduler.nextSync - Date.now() <= testPrefDelay);
+ }
+
+ _("Test navigating within the same tab does NOT trigger a sync");
+ // Pretend we just synced
+ await tracker.clearChangedIDs();
+ clearQuickWriteTimer(tracker);
+
+ tracker.onLocationChange(
+ { isTopLevel: true },
+ undefined,
+ Services.io.newURI("https://www.mozilla.org"),
+ Ci.nsIWebProgressListener.LOCATION_CHANGE_RELOAD
+ );
+ Assert.ok(
+ !tracker.modified,
+ "location change for reloading doesn't trigger a sync"
+ );
+ Assert.ok(!tracker.tabsQuickWriteTimer, "reload does not trigger a sync");
+
+ // Pretend we just synced
+ await tracker.clearChangedIDs();
+ clearQuickWriteTimer(tracker);
+
+ _("Test navigating to an about page does trigger sync");
+ tracker.onLocationChange(
+ { isTopLevel: true },
+ undefined,
+ Services.io.newURI("about:config")
+ );
+ Assert.ok(tracker.modified, "about page does not trigger a tab modified");
+ Assert.ok(
+ tracker.tabsQuickWriteTimer,
+ "about schema should trigger a sync happening soon"
+ );
+
+ _("Test adjusting the filterScheme pref works");
+ // Pretend we just synced
+ await tracker.clearChangedIDs();
+ clearQuickWriteTimer(tracker);
+
+ Svc.PrefBranch.setStringPref(
+ "engine.tabs.filteredSchemes",
+ // Removing the about scheme for this test
+ "resource|chrome|file|blob|moz-extension"
+ );
+ tracker.onLocationChange(
+ { isTopLevel: true },
+ undefined,
+ Services.io.newURI("about:config")
+ );
+ Assert.ok(
+ tracker.modified,
+ "about page triggers a modified after we changed the pref"
+ );
+ Assert.ok(
+ tracker.tabsQuickWriteTimer,
+ "about page should schedule a quickWrite sync soon after we changed the pref"
+ );
+
+ _("Test no sync after tab change for accounts with <= 1 clients");
+ // Pretend we just synced
+ await tracker.clearChangedIDs();
+ clearQuickWriteTimer(tracker);
+ // Setting clients to only 1 so we don't sync after a tab change
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 0);
+ scheduler.updateClientMode();
+ Assert.equal(scheduler.numClients, 1);
+
+ tracker.onLocationChange(
+ { isTopLevel: true },
+ undefined,
+ Services.io.newURI("https://www.mozilla.org")
+ );
+ Assert.ok(
+ tracker.modified,
+ "location change for a new top-level document flagged as modified"
+ );
+ Assert.ok(
+ !tracker.tabsQuickWriteTimer,
+ "We should NOT be syncing shortly because there is only one client"
+ );
+
+ _("Changing the pref adjusts the sync schedule");
+ Svc.PrefBranch.setIntPref("syncedTabs.syncDelayAfterTabChange", 10000); // 10seconds
+ let delayPref = Svc.PrefBranch.getIntPref(
+ "syncedTabs.syncDelayAfterTabChange"
+ );
+ let evttype = "TabOpen";
+ Assert.equal(delayPref, 10000); // ensure our pref is at 10s
+ // Only have task continuity if we have more than 1 device
+ Svc.PrefBranch.setIntPref("clients.devices.desktop", 1);
+ Svc.PrefBranch.setIntPref("clients.devices.mobile", 1);
+ scheduler.updateClientMode();
+ Assert.equal(scheduler.numClients, 2);
+ clearQuickWriteTimer(tracker);
+
+ // Fire ontab event
+ tracker.onTab({
+ type: evttype,
+ originalTarget: evttype,
+ target: { entries: [], currentURI: "about:config" },
+ });
+
+ // Ensure the tracker fired
+ Assert.ok(tracker.modified);
+ // We should be scheduling <= preference value
+ Assert.equal(tracker.tabsQuickWriteTimer.delay, delayPref);
+
+ _("We should not have a sync scheduled if pref is at 0");
+
+ Svc.PrefBranch.setIntPref("syncedTabs.syncDelayAfterTabChange", 0);
+ // Pretend we just synced
+ await tracker.clearChangedIDs();
+ clearQuickWriteTimer(tracker);
+
+ // Fire ontab event
+ evttype = "TabOpen";
+ tracker.onTab({
+ type: evttype,
+ originalTarget: evttype,
+ target: { entries: [], currentURI: "about:config" },
+ });
+ // Ensure the tracker fired
+ Assert.ok(tracker.modified);
+
+ // We should NOT be scheduled for a sync soon
+ Assert.ok(!tracker.tabsQuickWriteTimer);
+
+ scheduler.setDefaults();
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+});
diff --git a/services/sync/tests/unit/test_telemetry.js b/services/sync/tests/unit/test_telemetry.js
new file mode 100644
index 0000000000..961e96a01b
--- /dev/null
+++ b/services/sync/tests/unit/test_telemetry.js
@@ -0,0 +1,1462 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+const { WBORecord } = ChromeUtils.importESModule(
+ "resource://services-sync/record.sys.mjs"
+);
+const { Resource } = ChromeUtils.importESModule(
+ "resource://services-sync/resource.sys.mjs"
+);
+const { RotaryEngine } = ChromeUtils.importESModule(
+ "resource://testing-common/services/sync/rotaryengine.sys.mjs"
+);
+const { getFxAccountsSingleton } = ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+);
+const fxAccounts = getFxAccountsSingleton();
+
+function SteamStore(engine) {
+ Store.call(this, "Steam", engine);
+}
+Object.setPrototypeOf(SteamStore.prototype, Store.prototype);
+
+function SteamTracker(name, engine) {
+ LegacyTracker.call(this, name || "Steam", engine);
+}
+Object.setPrototypeOf(SteamTracker.prototype, LegacyTracker.prototype);
+
+function SteamEngine(service) {
+ SyncEngine.call(this, "steam", service);
+}
+
+SteamEngine.prototype = {
+ _storeObj: SteamStore,
+ _trackerObj: SteamTracker,
+ _errToThrow: null,
+ problemsToReport: null,
+ async _sync() {
+ if (this._errToThrow) {
+ throw this._errToThrow;
+ }
+ },
+ getValidator() {
+ return new SteamValidator();
+ },
+};
+Object.setPrototypeOf(SteamEngine.prototype, SyncEngine.prototype);
+
+function BogusEngine(service) {
+ SyncEngine.call(this, "bogus", service);
+}
+
+BogusEngine.prototype = Object.create(SteamEngine.prototype);
+
+class SteamValidator {
+ async canValidate() {
+ return true;
+ }
+
+ async validate(engine) {
+ return {
+ problems: new SteamValidationProblemData(engine.problemsToReport),
+ version: 1,
+ duration: 0,
+ recordCount: 0,
+ };
+ }
+}
+
+class SteamValidationProblemData {
+ constructor(problemsToReport = []) {
+ this.problemsToReport = problemsToReport;
+ }
+
+ getSummary() {
+ return this.problemsToReport;
+ }
+}
+
+async function cleanAndGo(engine, server) {
+ await engine._tracker.clearChangedIDs();
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ syncTestLogging();
+ Service.recordManager.clearCache();
+ await promiseStopServer(server);
+}
+
+add_task(async function setup() {
+ // Avoid addon manager complaining about not being initialized
+ await Service.engineManager.unregister("addons");
+ await Service.engineManager.unregister("extension-storage");
+});
+
+add_task(async function test_basic() {
+ enableValidationPrefs();
+
+ let helper = track_collections_helper();
+ let upd = helper.with_updated_collection;
+
+ let handlers = {
+ "/1.1/johndoe/info/collections": helper.handler,
+ "/1.1/johndoe/storage/crypto/keys": upd(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/meta/global": upd(
+ "meta",
+ new ServerWBO("global").handler()
+ ),
+ };
+
+ let collections = [
+ "clients",
+ "bookmarks",
+ "forms",
+ "history",
+ "passwords",
+ "prefs",
+ "tabs",
+ ];
+
+ for (let coll of collections) {
+ handlers["/1.1/johndoe/storage/" + coll] = upd(
+ coll,
+ new ServerCollection({}, true).handler()
+ );
+ }
+
+ let server = httpd_setup(handlers);
+ await configureIdentity({ username: "johndoe" }, server);
+
+ let ping = await wait_for_ping(() => Service.sync(), true, true);
+
+ // Check the "os" block - we can't really check specific values, but can
+ // check it smells sane.
+ ok(ping.os, "there is an OS block");
+ ok("name" in ping.os, "there is an OS name");
+ ok("version" in ping.os, "there is an OS version");
+ ok("locale" in ping.os, "there is an OS locale");
+
+ for (const pref of Svc.PrefBranch.getChildList("")) {
+ Svc.PrefBranch.clearUserPref(pref);
+ }
+ await promiseStopServer(server);
+});
+
+add_task(async function test_processIncoming_error() {
+ let engine = Service.engineManager.get("bookmarks");
+ await engine.initialize();
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let collection = server.user("foo").collection("bookmarks");
+ try {
+ // Create a bogus record that when synced down will provoke a
+ // network error which in turn provokes an exception in _processIncoming.
+ const BOGUS_GUID = "zzzzzzzzzzzz";
+ let bogus_record = collection.insert(BOGUS_GUID, "I'm a bogus record!");
+ bogus_record.get = function get() {
+ throw new Error("Sync this!");
+ };
+ // Make the 10 minutes old so it will only be synced in the toFetch phase.
+ bogus_record.modified = Date.now() / 1000 - 60 * 10;
+ await engine.setLastSync(Date.now() / 1000 - 60);
+ engine.toFetch = new SerializableSet([BOGUS_GUID]);
+
+ let error, pingPayload, fullPing;
+ try {
+ await sync_engine_and_validate_telem(
+ engine,
+ true,
+ (errPing, fullErrPing) => {
+ pingPayload = errPing;
+ fullPing = fullErrPing;
+ }
+ );
+ } catch (ex) {
+ error = ex;
+ }
+ ok(!!error);
+ ok(!!pingPayload);
+
+ equal(fullPing.uid, "f".repeat(32)); // as setup by SyncTestingInfrastructure
+ deepEqual(pingPayload.failureReason, {
+ name: "httperror",
+ code: 500,
+ });
+
+ equal(pingPayload.engines.length, 1);
+
+ equal(pingPayload.engines[0].name, "bookmarks-buffered");
+ deepEqual(pingPayload.engines[0].failureReason, {
+ name: "httperror",
+ code: 500,
+ });
+ } finally {
+ await store.wipe();
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_uploading() {
+ let engine = Service.engineManager.get("bookmarks");
+ await engine.initialize();
+ let store = engine._store;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ let bmk = await PlacesUtils.bookmarks.insert({
+ parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+ url: "http://getfirefox.com/",
+ title: "Get Firefox!",
+ });
+
+ try {
+ let ping = await sync_engine_and_validate_telem(engine, false);
+ ok(!!ping);
+ equal(ping.engines.length, 1);
+ equal(ping.engines[0].name, "bookmarks-buffered");
+ ok(!!ping.engines[0].outgoing);
+ greater(ping.engines[0].outgoing[0].sent, 0);
+ ok(!ping.engines[0].incoming);
+
+ await PlacesUtils.bookmarks.update({
+ guid: bmk.guid,
+ title: "New Title",
+ });
+
+ await store.wipe();
+ await engine.resetClient();
+ // We don't sync via the service, so don't re-hit info/collections, so
+ // lastModified remaning at zero breaks things subtly...
+ engine.lastModified = null;
+
+ ping = await sync_engine_and_validate_telem(engine, false);
+ equal(ping.engines.length, 1);
+ equal(ping.engines[0].name, "bookmarks-buffered");
+ equal(ping.engines[0].outgoing.length, 1);
+ ok(!!ping.engines[0].incoming);
+ } finally {
+ // Clean up.
+ await store.wipe();
+ await cleanAndGo(engine, server);
+ }
+});
+
+add_task(async function test_upload_failed() {
+ let collection = new ServerCollection();
+ collection._wbos.flying = new ServerWBO("flying");
+
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+
+ await SyncTestingInfrastructure(server);
+ await configureIdentity({ username: "foo" }, server);
+
+ let engine = new RotaryEngine(Service);
+ engine._store.items = {
+ flying: "LNER Class A3 4472",
+ scotsman: "Flying Scotsman",
+ peppercorn: "Peppercorn Class",
+ };
+ const FLYING_CHANGED = 12345;
+ const SCOTSMAN_CHANGED = 23456;
+ const PEPPERCORN_CHANGED = 34567;
+ await engine._tracker.addChangedID("flying", FLYING_CHANGED);
+ await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
+ await engine._tracker.addChangedID("peppercorn", PEPPERCORN_CHANGED);
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
+ let changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_upload_failed: Rotary tracker contents at first sync: ${JSON.stringify(
+ changes
+ )}`
+ );
+ engine.enabled = true;
+ let ping = await sync_engine_and_validate_telem(engine, true);
+ ok(!!ping);
+ equal(ping.engines.length, 1);
+ equal(ping.engines[0].incoming, null);
+ deepEqual(ping.engines[0].outgoing, [
+ {
+ sent: 3,
+ failed: 2,
+ failedReasons: [
+ { name: "scotsman", count: 1 },
+ { name: "peppercorn", count: 1 },
+ ],
+ },
+ ]);
+ await engine.setLastSync(123);
+
+ changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_upload_failed: Rotary tracker contents at second sync: ${JSON.stringify(
+ changes
+ )}`
+ );
+ ping = await sync_engine_and_validate_telem(engine, true);
+ ok(!!ping);
+ equal(ping.engines.length, 1);
+ deepEqual(ping.engines[0].outgoing, [
+ {
+ sent: 2,
+ failed: 2,
+ failedReasons: [
+ { name: "scotsman", count: 1 },
+ { name: "peppercorn", count: 1 },
+ ],
+ },
+ ]);
+ } finally {
+ await cleanAndGo(engine, server);
+ await engine.finalize();
+ }
+});
+
+add_task(async function test_sync_partialUpload() {
+ let collection = new ServerCollection();
+ let server = sync_httpd_setup({
+ "/1.1/foo/storage/rotary": collection.handler(),
+ });
+ await SyncTestingInfrastructure(server);
+ await generateNewKeys(Service.collectionKeys);
+
+ let engine = new RotaryEngine(Service);
+ await engine.setLastSync(123);
+
+ // Create a bunch of records (and server side handlers)
+ for (let i = 0; i < 234; i++) {
+ let id = "record-no-" + i;
+ engine._store.items[id] = "Record No. " + i;
+ await engine._tracker.addChangedID(id, i);
+ // Let two items in the first upload batch fail.
+ if (i != 23 && i != 42) {
+ collection.insert(id);
+ }
+ }
+
+ let syncID = await engine.resetLocalSyncID();
+ let meta_global = Service.recordManager.set(
+ engine.metaURL,
+ new WBORecord(engine.metaURL)
+ );
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
+
+ try {
+ let changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_sync_partialUpload: Rotary tracker contents at first sync: ${JSON.stringify(
+ changes
+ )}`
+ );
+ engine.enabled = true;
+ let ping = await sync_engine_and_validate_telem(engine, true);
+
+ ok(!!ping);
+ ok(!ping.failureReason);
+ equal(ping.engines.length, 1);
+ equal(ping.engines[0].name, "rotary");
+ ok(!ping.engines[0].incoming);
+ ok(!ping.engines[0].failureReason);
+ deepEqual(ping.engines[0].outgoing, [
+ {
+ sent: 234,
+ failed: 2,
+ failedReasons: [
+ { name: "record-no-23", count: 1 },
+ { name: "record-no-42", count: 1 },
+ ],
+ },
+ ]);
+ collection.post = function () {
+ throw new Error("Failure");
+ };
+
+ engine._store.items["record-no-1000"] = "Record No. 1000";
+ await engine._tracker.addChangedID("record-no-1000", 1000);
+ collection.insert("record-no-1000", 1000);
+
+ await engine.setLastSync(123);
+ ping = null;
+
+ changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_sync_partialUpload: Rotary tracker contents at second sync: ${JSON.stringify(
+ changes
+ )}`
+ );
+ try {
+ // should throw
+ await sync_engine_and_validate_telem(
+ engine,
+ true,
+ errPing => (ping = errPing)
+ );
+ } catch (e) {}
+ // It would be nice if we had a more descriptive error for this...
+ let uploadFailureError = {
+ name: "httperror",
+ code: 500,
+ };
+
+ ok(!!ping);
+ deepEqual(ping.failureReason, uploadFailureError);
+ equal(ping.engines.length, 1);
+ equal(ping.engines[0].name, "rotary");
+ deepEqual(ping.engines[0].incoming, {
+ failed: 1,
+ failedReasons: [{ name: "No ciphertext: nothing to decrypt?", count: 1 }],
+ });
+ ok(!ping.engines[0].outgoing);
+ deepEqual(ping.engines[0].failureReason, uploadFailureError);
+ } finally {
+ await cleanAndGo(engine, server);
+ await engine.finalize();
+ }
+});
+
+add_task(async function test_generic_engine_fail() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let e = new Error("generic failure message");
+ engine._errToThrow = e;
+
+ try {
+ const changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_generic_engine_fail: Steam tracker contents: ${JSON.stringify(
+ changes
+ )}`
+ );
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.service, SYNC_FAILED_PARTIAL);
+ deepEqual(ping.engines.find(err => err.name === "steam").failureReason, {
+ name: "unexpectederror",
+ error: String(e),
+ });
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_engine_fail_weird_errors() {
+ enableValidationPrefs();
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ try {
+ let msg = "Bad things happened!";
+ engine._errToThrow = { message: msg };
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.service, SYNC_FAILED_PARTIAL);
+ deepEqual(ping.engines.find(err => err.name === "steam").failureReason, {
+ name: "unexpectederror",
+ error: "Bad things happened!",
+ });
+ });
+ let e = { msg };
+ engine._errToThrow = e;
+ await sync_and_validate_telem(ping => {
+ deepEqual(ping.engines.find(err => err.name === "steam").failureReason, {
+ name: "unexpectederror",
+ error: JSON.stringify(e),
+ });
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_overrideTelemetryName() {
+ enableValidationPrefs(["steam"]);
+
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.overrideTelemetryName = "steam-but-better";
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+
+ const problemsToReport = [
+ { name: "someProblem", count: 123 },
+ { name: "anotherProblem", count: 456 },
+ ];
+
+ try {
+ info("Sync with validation problems");
+ engine.problemsToReport = problemsToReport;
+ await sync_and_validate_telem(ping => {
+ let enginePing = ping.engines.find(e => e.name === "steam-but-better");
+ ok(enginePing);
+ ok(!ping.engines.find(e => e.name === "steam"));
+ deepEqual(
+ enginePing.validation,
+ {
+ version: 1,
+ checked: 0,
+ problems: problemsToReport,
+ },
+ "Should include validation report with overridden name"
+ );
+ });
+
+ info("Sync without validation problems");
+ engine.problemsToReport = null;
+ await sync_and_validate_telem(ping => {
+ let enginePing = ping.engines.find(e => e.name === "steam-but-better");
+ ok(enginePing);
+ ok(!ping.engines.find(e => e.name === "steam"));
+ ok(
+ !enginePing.validation,
+ "Should not include validation report when there are no problems"
+ );
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_engine_fail_ioerror() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ // create an IOError to re-throw as part of Sync.
+ try {
+ // (Note that fakeservices.js has replaced Utils.jsonMove etc, but for
+ // this test we need the real one so we get real exceptions from the
+ // filesystem.)
+ await Utils._real_jsonMove("file-does-not-exist", "anything", {});
+ } catch (ex) {
+ engine._errToThrow = ex;
+ }
+ ok(engine._errToThrow, "expecting exception");
+
+ try {
+ const changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_engine_fail_ioerror: Steam tracker contents: ${JSON.stringify(
+ changes
+ )}`
+ );
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.service, SYNC_FAILED_PARTIAL);
+ let failureReason = ping.engines.find(
+ e => e.name === "steam"
+ ).failureReason;
+ equal(failureReason.name, "unexpectederror");
+ // ensure the profile dir in the exception message has been stripped.
+ ok(
+ !failureReason.error.includes(PathUtils.profileDir),
+ failureReason.error
+ );
+ ok(failureReason.error.includes("[profileDir]"), failureReason.error);
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_error_detections() {
+ let telem = get_sync_test_telemetry();
+
+ // Non-network NS_ERROR_ codes get their own category.
+ Assert.deepEqual(
+ telem.transformError(Components.Exception("", Cr.NS_ERROR_FAILURE)),
+ { name: "nserror", code: Cr.NS_ERROR_FAILURE }
+ );
+
+ // Some NS_ERROR_ code in the "network" module are treated as http errors.
+ Assert.deepEqual(
+ telem.transformError(Components.Exception("", Cr.NS_ERROR_UNKNOWN_HOST)),
+ { name: "httperror", code: Cr.NS_ERROR_UNKNOWN_HOST }
+ );
+ // Some NS_ERROR_ABORT is treated as network by our telemetry.
+ Assert.deepEqual(
+ telem.transformError(Components.Exception("", Cr.NS_ERROR_ABORT)),
+ { name: "httperror", code: Cr.NS_ERROR_ABORT }
+ );
+});
+
+add_task(async function test_clean_urls() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ engine._errToThrow = new TypeError(
+ "http://www.google .com is not a valid URL."
+ );
+
+ try {
+ const changes = await engine._tracker.getChangedIDs();
+ _(`test_clean_urls: Steam tracker contents: ${JSON.stringify(changes)}`);
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.service, SYNC_FAILED_PARTIAL);
+ let failureReason = ping.engines.find(
+ e => e.name === "steam"
+ ).failureReason;
+ equal(failureReason.name, "unexpectederror");
+ equal(failureReason.error, "<URL> is not a valid URL.");
+ });
+ // Handle other errors that include urls.
+ engine._errToThrow =
+ "Other error message that includes some:url/foo/bar/ in it.";
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.service, SYNC_FAILED_PARTIAL);
+ let failureReason = ping.engines.find(
+ e => e.name === "steam"
+ ).failureReason;
+ equal(failureReason.name, "unexpectederror");
+ equal(
+ failureReason.error,
+ "Other error message that includes <URL> in it."
+ );
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+// Test sanitizing guid-related errors with the pattern of <guid: {guid}>
+add_task(async function test_sanitize_bookmarks_guid() {
+ let { ErrorSanitizer } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+ );
+
+ for (let [original, expected] of [
+ [
+ "Can't insert Bookmark <guid: sknD84IdnSY2> into Folder <guid: odfninDdi93_3>",
+ "Can't insert Bookmark <GUID> into Folder <GUID>",
+ ],
+ [
+ "Merge Error: Item <guid: H6fmPA16gZs9> can't contain itself",
+ "Merge Error: Item <GUID> can't contain itself",
+ ],
+ ]) {
+ const sanitized = ErrorSanitizer.cleanErrorMessage(original);
+ Assert.equal(sanitized, expected);
+ }
+});
+
+// Test sanitization of some hard-coded error strings.
+add_task(async function test_clean_errors() {
+ let { ErrorSanitizer } = ChromeUtils.importESModule(
+ "resource://services-sync/telemetry.sys.mjs"
+ );
+
+ for (let [message, name, expected] of [
+ [
+ `Could not open the file at ${PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ "addonsreconciler.json"
+ )} for writing`,
+ "NotFoundError",
+ "OS error [File/Path not found] Could not open the file at [profileDir]/weave/addonsreconciler.json for writing",
+ ],
+ [
+ `Could not get info for the file at ${PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ "addonsreconciler.json"
+ )}`,
+ "NotAllowedError",
+ "OS error [Permission denied] Could not get info for the file at [profileDir]/weave/addonsreconciler.json",
+ ],
+ ]) {
+ const error = new DOMException(message, name);
+ const sanitized = ErrorSanitizer.cleanErrorMessage(message, error);
+ Assert.equal(sanitized, expected);
+ }
+});
+
+// Arrange for a sync to hit a "real" OS error during a sync and make sure it's sanitized.
+add_task(async function test_clean_real_os_error() {
+ enableValidationPrefs();
+
+ // Simulate a real error.
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let path = PathUtils.join(PathUtils.profileDir, "no", "such", "path.json");
+ try {
+ await IOUtils.readJSON(path);
+ throw new Error("should fail to read the file");
+ } catch (ex) {
+ engine._errToThrow = ex;
+ }
+
+ try {
+ const changes = await engine._tracker.getChangedIDs();
+ _(`test_clean_urls: Steam tracker contents: ${JSON.stringify(changes)}`);
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.service, SYNC_FAILED_PARTIAL);
+ let failureReason = ping.engines.find(
+ e => e.name === "steam"
+ ).failureReason;
+ equal(failureReason.name, "unexpectederror");
+ equal(
+ failureReason.error,
+ "OS error [File/Path not found] Could not open the file at [profileDir]/no/such/path.json"
+ );
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_initial_sync_engines() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ // These are the only ones who actually have things to sync at startup.
+ let telemetryEngineNames = ["clients", "prefs", "tabs", "bookmarks-buffered"];
+ let server = await serverForEnginesWithKeys(
+ { foo: "password" },
+ ["bookmarks", "prefs", "tabs"].map(name => Service.engineManager.get(name))
+ );
+ await SyncTestingInfrastructure(server);
+ try {
+ const changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_initial_sync_engines: Steam tracker contents: ${JSON.stringify(
+ changes
+ )}`
+ );
+ let ping = await wait_for_ping(() => Service.sync(), true);
+
+ equal(ping.engines.find(e => e.name === "clients").outgoing[0].sent, 1);
+ equal(ping.engines.find(e => e.name === "tabs").outgoing[0].sent, 1);
+
+ // for the rest we don't care about specifics
+ for (let e of ping.engines) {
+ if (!telemetryEngineNames.includes(engine.name)) {
+ continue;
+ }
+ greaterOrEqual(e.took, 1);
+ ok(!!e.outgoing);
+ equal(e.outgoing.length, 1);
+ notEqual(e.outgoing[0].sent, undefined);
+ equal(e.outgoing[0].failed, undefined);
+ equal(e.outgoing[0].failedReasons, undefined);
+ }
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_nserror() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ engine._errToThrow = Components.Exception(
+ "NS_ERROR_UNKNOWN_HOST",
+ Cr.NS_ERROR_UNKNOWN_HOST
+ );
+ try {
+ const changes = await engine._tracker.getChangedIDs();
+ _(`test_nserror: Steam tracker contents: ${JSON.stringify(changes)}`);
+ await sync_and_validate_telem(ping => {
+ deepEqual(ping.status, {
+ service: SYNC_FAILED_PARTIAL,
+ sync: LOGIN_FAILED_NETWORK_ERROR,
+ });
+ let enginePing = ping.engines.find(e => e.name === "steam");
+ deepEqual(enginePing.failureReason, {
+ name: "httperror",
+ code: Cr.NS_ERROR_UNKNOWN_HOST,
+ });
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_sync_why() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(SteamEngine);
+ let engine = Service.engineManager.get("steam");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ await SyncTestingInfrastructure(server);
+ let e = new Error("generic failure message");
+ engine._errToThrow = e;
+
+ try {
+ const changes = await engine._tracker.getChangedIDs();
+ _(
+ `test_generic_engine_fail: Steam tracker contents: ${JSON.stringify(
+ changes
+ )}`
+ );
+ let ping = await wait_for_ping(
+ () => Service.sync({ why: "user" }),
+ true,
+ false
+ );
+ _(JSON.stringify(ping));
+ equal(ping.why, "user");
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_discarding() {
+ enableValidationPrefs();
+
+ let helper = track_collections_helper();
+ let upd = helper.with_updated_collection;
+ let telem = get_sync_test_telemetry();
+ telem.maxPayloadCount = 2;
+ telem.submissionInterval = Infinity;
+ let oldSubmit = telem.submit;
+
+ let server;
+ try {
+ let handlers = {
+ "/1.1/johndoe/info/collections": helper.handler,
+ "/1.1/johndoe/storage/crypto/keys": upd(
+ "crypto",
+ new ServerWBO("keys").handler()
+ ),
+ "/1.1/johndoe/storage/meta/global": upd(
+ "meta",
+ new ServerWBO("global").handler()
+ ),
+ };
+
+ let collections = [
+ "clients",
+ "bookmarks",
+ "forms",
+ "history",
+ "passwords",
+ "prefs",
+ "tabs",
+ ];
+
+ for (let coll of collections) {
+ handlers["/1.1/johndoe/storage/" + coll] = upd(
+ coll,
+ new ServerCollection({}, true).handler()
+ );
+ }
+
+ server = httpd_setup(handlers);
+ await configureIdentity({ username: "johndoe" }, server);
+ telem.submit = p =>
+ ok(
+ false,
+ "Submitted telemetry ping when we should not have" + JSON.stringify(p)
+ );
+
+ for (let i = 0; i < 5; ++i) {
+ await Service.sync();
+ }
+ telem.submit = oldSubmit;
+ telem.submissionInterval = -1;
+ let ping = await wait_for_ping(() => Service.sync(), true, true); // with this we've synced 6 times
+ equal(ping.syncs.length, 2);
+ equal(ping.discarded, 4);
+ } finally {
+ telem.maxPayloadCount = 500;
+ telem.submissionInterval = -1;
+ telem.submit = oldSubmit;
+ if (server) {
+ await promiseStopServer(server);
+ }
+ }
+});
+
+add_task(async function test_submit_interval() {
+ let telem = get_sync_test_telemetry();
+ let oldSubmit = telem.submit;
+ let numSubmissions = 0;
+ telem.submit = function () {
+ numSubmissions += 1;
+ };
+
+ function notify(what, data = null) {
+ Svc.Obs.notify(what, JSON.stringify(data));
+ }
+
+ try {
+ // submissionInterval is set such that each sync should submit
+ notify("weave:service:sync:start", { why: "testing" });
+ notify("weave:service:sync:finish");
+ Assert.equal(numSubmissions, 1, "should submit this ping due to interval");
+
+ // As should each event outside of a sync.
+ Service.recordTelemetryEvent("object", "method");
+ Assert.equal(numSubmissions, 2);
+
+ // But events while we are syncing should not.
+ notify("weave:service:sync:start", { why: "testing" });
+ Service.recordTelemetryEvent("object", "method");
+ Assert.equal(numSubmissions, 2, "no submission for this event");
+ notify("weave:service:sync:finish");
+ Assert.equal(numSubmissions, 3, "was submitted after sync finish");
+ } finally {
+ telem.submit = oldSubmit;
+ }
+});
+
+add_task(async function test_no_foreign_engines_in_error_ping() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(BogusEngine);
+ let engine = Service.engineManager.get("bogus");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+ engine._errToThrow = new Error("Oh no!");
+ await SyncTestingInfrastructure(server);
+ try {
+ await sync_and_validate_telem(ping => {
+ equal(ping.status.service, SYNC_FAILED_PARTIAL);
+ ok(ping.engines.every(e => e.name !== "bogus"));
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_no_foreign_engines_in_success_ping() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(BogusEngine);
+ let engine = Service.engineManager.get("bogus");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ try {
+ await sync_and_validate_telem(ping => {
+ ok(ping.engines.every(e => e.name !== "bogus"));
+ });
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_events() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(BogusEngine);
+ let engine = Service.engineManager.get("bogus");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+
+ let telem = get_sync_test_telemetry();
+ telem.submissionInterval = Infinity;
+
+ try {
+ let serverTime = Resource.serverTime;
+ Service.recordTelemetryEvent("object", "method", "value", { foo: "bar" });
+ let ping = await wait_for_ping(() => Service.sync(), true, true);
+ equal(ping.events.length, 1);
+ let [timestamp, category, method, object, value, extra] = ping.events[0];
+ ok(typeof timestamp == "number" && timestamp > 0); // timestamp.
+ equal(category, "sync");
+ equal(method, "method");
+ equal(object, "object");
+ equal(value, "value");
+ deepEqual(extra, { foo: "bar", serverTime: String(serverTime) });
+ ping = await wait_for_ping(
+ () => {
+ // Test with optional values.
+ Service.recordTelemetryEvent("object", "method");
+ },
+ false,
+ true
+ );
+ equal(ping.events.length, 1);
+ equal(ping.events[0].length, 4);
+
+ ping = await wait_for_ping(
+ () => {
+ Service.recordTelemetryEvent("object", "method", "extra");
+ },
+ false,
+ true
+ );
+ equal(ping.events.length, 1);
+ equal(ping.events[0].length, 5);
+
+ ping = await wait_for_ping(
+ () => {
+ Service.recordTelemetryEvent("object", "method", undefined, {
+ foo: "bar",
+ });
+ },
+ false,
+ true
+ );
+ equal(ping.events.length, 1);
+ equal(ping.events[0].length, 6);
+ [timestamp, category, method, object, value, extra] = ping.events[0];
+ equal(value, null);
+
+ // Fake a submission due to shutdown.
+ ping = await wait_for_ping(
+ () => {
+ telem.submissionInterval = Infinity;
+ Service.recordTelemetryEvent("object", "method", undefined, {
+ foo: "bar",
+ });
+ telem.finish("shutdown");
+ },
+ false,
+ true
+ );
+ equal(ping.syncs.length, 0);
+ equal(ping.events.length, 1);
+ equal(ping.events[0].length, 6);
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_histograms() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(BogusEngine);
+ let engine = Service.engineManager.get("bogus");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ try {
+ let histId = "TELEMETRY_TEST_LINEAR";
+ Services.obs.notifyObservers(null, "weave:telemetry:histogram", histId);
+ let ping = await wait_for_ping(() => Service.sync(), true, true);
+ equal(Object.keys(ping.histograms).length, 1);
+ equal(ping.histograms[histId].sum, 0);
+ equal(ping.histograms[histId].histogram_type, 1);
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_invalid_events() {
+ enableValidationPrefs();
+
+ await Service.engineManager.register(BogusEngine);
+ let engine = Service.engineManager.get("bogus");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+
+ async function checkNotRecorded(...args) {
+ Service.recordTelemetryEvent.call(args);
+ let ping = await wait_for_ping(() => Service.sync(), false, true);
+ equal(ping.events, undefined);
+ }
+
+ await SyncTestingInfrastructure(server);
+ try {
+ let long21 = "l".repeat(21);
+ let long81 = "l".repeat(81);
+ let long86 = "l".repeat(86);
+ await checkNotRecorded("object");
+ await checkNotRecorded("object", 2);
+ await checkNotRecorded(2, "method");
+ await checkNotRecorded("object", "method", 2);
+ await checkNotRecorded("object", "method", "value", 2);
+ await checkNotRecorded("object", "method", "value", { foo: 2 });
+ await checkNotRecorded(long21, "method", "value");
+ await checkNotRecorded("object", long21, "value");
+ await checkNotRecorded("object", "method", long81);
+ let badextra = {};
+ badextra[long21] = "x";
+ await checkNotRecorded("object", "method", "value", badextra);
+ badextra = { x: long86 };
+ await checkNotRecorded("object", "method", "value", badextra);
+ for (let i = 0; i < 10; i++) {
+ badextra["name" + i] = "x";
+ }
+ await checkNotRecorded("object", "method", "value", badextra);
+ } finally {
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_no_ping_for_self_hosters() {
+ enableValidationPrefs();
+
+ let telem = get_sync_test_telemetry();
+ let oldSubmit = telem.submit;
+
+ await Service.engineManager.register(BogusEngine);
+ let engine = Service.engineManager.get("bogus");
+ engine.enabled = true;
+ let server = await serverForFoo(engine);
+
+ await SyncTestingInfrastructure(server);
+ try {
+ let submitPromise = new Promise(resolve => {
+ telem.submit = function () {
+ let result = oldSubmit.apply(this, arguments);
+ resolve(result);
+ };
+ });
+ await Service.sync();
+ let pingSubmitted = await submitPromise;
+ // The Sync testing infrastructure already sets up a custom token server,
+ // so we don't need to do anything to simulate a self-hosted user.
+ ok(!pingSubmitted, "Should not submit ping with custom token server URL");
+ } finally {
+ telem.submit = oldSubmit;
+ await cleanAndGo(engine, server);
+ await Service.engineManager.unregister(engine);
+ }
+});
+
+add_task(async function test_fxa_device_telem() {
+ let t = get_sync_test_telemetry();
+ let syncEnabled = true;
+ let oldGetClientsEngineRecords = t.getClientsEngineRecords;
+ let oldGetFxaDevices = t.getFxaDevices;
+ let oldSyncIsEnabled = t.syncIsEnabled;
+ let oldSanitizeFxaDeviceId = t.sanitizeFxaDeviceId;
+ t.syncIsEnabled = () => syncEnabled;
+ t.sanitizeFxaDeviceId = id => `So clean: ${id}`;
+ try {
+ let keep0 = Utils.makeGUID();
+ let keep1 = Utils.makeGUID();
+ let keep2 = Utils.makeGUID();
+ let curdev = Utils.makeGUID();
+
+ let keep1Sync = Utils.makeGUID();
+ let keep2Sync = Utils.makeGUID();
+ let curdevSync = Utils.makeGUID();
+ let fxaDevices = [
+ {
+ id: curdev,
+ isCurrentDevice: true,
+ lastAccessTime: Date.now() - 1000 * 60 * 60 * 24 * 1,
+ pushEndpointExpired: false,
+ type: "desktop",
+ name: "current device",
+ },
+ {
+ id: keep0,
+ isCurrentDevice: false,
+ lastAccessTime: Date.now() - 1000 * 60 * 60 * 24 * 10,
+ pushEndpointExpired: false,
+ type: "mobile",
+ name: "dupe",
+ },
+ // Valid 2
+ {
+ id: keep1,
+ isCurrentDevice: false,
+ lastAccessTime: Date.now() - 1000 * 60 * 60 * 24 * 1,
+ pushEndpointExpired: false,
+ type: "desktop",
+ name: "valid2",
+ },
+ // Valid 3
+ {
+ id: keep2,
+ isCurrentDevice: false,
+ lastAccessTime: Date.now() - 1000 * 60 * 60 * 24 * 5,
+ pushEndpointExpired: false,
+ type: "desktop",
+ name: "valid3",
+ },
+ ];
+ let clientInfo = [
+ {
+ id: keep1Sync,
+ fxaDeviceId: keep1,
+ os: "Windows 30",
+ version: "Firefox 1 million",
+ },
+ {
+ id: keep2Sync,
+ fxaDeviceId: keep2,
+ os: "firefox, but an os",
+ verison: "twelve",
+ },
+ {
+ id: Utils.makeGUID(),
+ fxaDeviceId: null,
+ os: "apparently ios used to keep write these IDs as null.",
+ version: "Doesn't seem to anymore",
+ },
+ {
+ id: curdevSync,
+ fxaDeviceId: curdev,
+ os: "emacs",
+ version: "22",
+ },
+ {
+ id: Utils.makeGUID(),
+ fxaDeviceId: Utils.makeGUID(),
+ os: "not part of the fxa device set at all",
+ version: "foo bar baz",
+ },
+ // keep0 intententionally omitted.
+ ];
+ t.getClientsEngineRecords = () => clientInfo;
+ let devInfo = t.updateFxaDevices(fxaDevices);
+ equal(devInfo.deviceID, t.sanitizeFxaDeviceId(curdev));
+ for (let d of devInfo.devices) {
+ ok(d.id.startsWith("So clean:"));
+ if (d.syncID) {
+ ok(d.syncID.startsWith("So clean:"));
+ }
+ }
+ equal(devInfo.devices.length, 4);
+ let k0 = devInfo.devices.find(d => d.id == t.sanitizeFxaDeviceId(keep0));
+ let k1 = devInfo.devices.find(d => d.id == t.sanitizeFxaDeviceId(keep1));
+ let k2 = devInfo.devices.find(d => d.id == t.sanitizeFxaDeviceId(keep2));
+
+ deepEqual(k0, {
+ id: t.sanitizeFxaDeviceId(keep0),
+ type: "mobile",
+ os: undefined,
+ version: undefined,
+ syncID: undefined,
+ });
+ deepEqual(k1, {
+ id: t.sanitizeFxaDeviceId(keep1),
+ type: "desktop",
+ os: clientInfo[0].os,
+ version: clientInfo[0].version,
+ syncID: t.sanitizeFxaDeviceId(keep1Sync),
+ });
+ deepEqual(k2, {
+ id: t.sanitizeFxaDeviceId(keep2),
+ type: "desktop",
+ os: clientInfo[1].os,
+ version: clientInfo[1].version,
+ syncID: t.sanitizeFxaDeviceId(keep2Sync),
+ });
+ let newCurId = Utils.makeGUID();
+ // Update the ID
+ fxaDevices[0].id = newCurId;
+
+ let keep3 = Utils.makeGUID();
+ fxaDevices.push({
+ id: keep3,
+ isCurrentDevice: false,
+ lastAccessTime: Date.now() - 1000 * 60 * 60 * 24 * 1,
+ pushEndpointExpired: false,
+ type: "desktop",
+ name: "valid 4",
+ });
+ devInfo = t.updateFxaDevices(fxaDevices);
+
+ let afterSubmit = [keep0, keep1, keep2, keep3, newCurId]
+ .map(id => t.sanitizeFxaDeviceId(id))
+ .sort();
+ deepEqual(devInfo.devices.map(d => d.id).sort(), afterSubmit);
+
+ // Reset this, as our override doesn't check for sync being enabled.
+ t.sanitizeFxaDeviceId = oldSanitizeFxaDeviceId;
+ syncEnabled = false;
+ fxAccounts.telemetry._setHashedUID(false);
+ devInfo = t.updateFxaDevices(fxaDevices);
+ equal(devInfo.deviceID, undefined);
+ equal(devInfo.devices.length, 5);
+ for (let d of devInfo.devices) {
+ equal(d.os, undefined);
+ equal(d.version, undefined);
+ equal(d.syncID, undefined);
+ // Type should still be present.
+ notEqual(d.type, undefined);
+ }
+ } finally {
+ t.getClientsEngineRecords = oldGetClientsEngineRecords;
+ t.getFxaDevices = oldGetFxaDevices;
+ t.syncIsEnabled = oldSyncIsEnabled;
+ t.sanitizeFxaDeviceId = oldSanitizeFxaDeviceId;
+ }
+});
+
+add_task(async function test_sanitize_fxa_device_id() {
+ let t = get_sync_test_telemetry();
+ fxAccounts.telemetry._setHashedUID(false);
+ sinon.stub(t, "syncIsEnabled").callsFake(() => true);
+ const rawDeviceId = "raw one two three";
+ try {
+ equal(t.sanitizeFxaDeviceId(rawDeviceId), null);
+ fxAccounts.telemetry._setHashedUID("mock uid");
+ const sanitizedDeviceId = t.sanitizeFxaDeviceId(rawDeviceId);
+ ok(sanitizedDeviceId);
+ notEqual(sanitizedDeviceId, rawDeviceId);
+ } finally {
+ t.syncIsEnabled.restore();
+ fxAccounts.telemetry._setHashedUID(false);
+ }
+});
+
+add_task(async function test_no_node_type() {
+ let server = sync_httpd_setup({});
+ await configureIdentity(null, server);
+
+ await sync_and_validate_telem(ping => {
+ ok(ping.syncNodeType === undefined);
+ }, true);
+ await promiseStopServer(server);
+});
+
+add_task(async function test_node_type() {
+ Service.identity.logout();
+ let server = sync_httpd_setup({});
+ await configureIdentity({ node_type: "the-node-type" }, server);
+
+ await sync_and_validate_telem(ping => {
+ equal(ping.syncNodeType, "the-node-type");
+ }, true);
+ await promiseStopServer(server);
+});
+
+add_task(async function test_node_type_change() {
+ let pingPromise = wait_for_pings(2);
+
+ Service.identity.logout();
+ let server = sync_httpd_setup({});
+ await configureIdentity({ node_type: "first-node-type" }, server);
+ // Default to submitting each hour - we should still submit on node change.
+ let telem = get_sync_test_telemetry();
+ telem.submissionInterval = 60 * 60 * 1000;
+ // reset the node type from previous test or our first sync will submit.
+ telem.lastSyncNodeType = null;
+ // do 2 syncs with the same node type.
+ await Service.sync();
+ await Service.sync();
+ // then another with a different node type.
+ Service.identity.logout();
+ await configureIdentity({ node_type: "second-node-type" }, server);
+ await Service.sync();
+ telem.finish();
+
+ let pings = await pingPromise;
+ equal(pings.length, 2);
+ equal(pings[0].syncs.length, 2, "2 syncs in first ping");
+ equal(pings[0].syncNodeType, "first-node-type");
+ equal(pings[1].syncs.length, 1, "1 sync in second ping");
+ equal(pings[1].syncNodeType, "second-node-type");
+ await promiseStopServer(server);
+});
+
+add_task(async function test_ids() {
+ let telem = get_sync_test_telemetry();
+ Assert.ok(!telem._shouldSubmitForDataChange());
+ fxAccounts.telemetry._setHashedUID("new_uid");
+ Assert.ok(telem._shouldSubmitForDataChange());
+ telem.maybeSubmitForDataChange();
+ // now it's been submitted the new uid is current.
+ Assert.ok(!telem._shouldSubmitForDataChange());
+});
+
+add_task(async function test_deletion_request_ping() {
+ async function assertRecordedSyncDeviceID(expected) {
+ // The scalar gets updated asynchronously, so wait a tick before checking.
+ await Promise.resolve();
+ const scalars =
+ Services.telemetry.getSnapshotForScalars("deletion-request").parent || {};
+ equal(scalars["deletion.request.sync_device_id"], expected);
+ }
+
+ const MOCK_HASHED_UID = "00112233445566778899aabbccddeeff";
+ const MOCK_DEVICE_ID1 = "ffeeddccbbaa99887766554433221100";
+ const MOCK_DEVICE_ID2 = "aabbccddeeff99887766554433221100";
+
+ // Calculated by hand using SHA256(DEVICE_ID + HASHED_UID)[:32]
+ const SANITIZED_DEVICE_ID1 = "dd7c845006df9baa1c6d756926519c8c";
+ const SANITIZED_DEVICE_ID2 = "0d06919a736fc029007e1786a091882c";
+
+ let currentDeviceID = null;
+ sinon.stub(fxAccounts.device, "getLocalId").callsFake(() => {
+ return Promise.resolve(currentDeviceID);
+ });
+ let telem = get_sync_test_telemetry();
+ sinon.stub(telem, "isProductionSyncUser").callsFake(() => true);
+ fxAccounts.telemetry._setHashedUID(false);
+ try {
+ // The scalar should start out undefined, since no user is actually logged in.
+ await assertRecordedSyncDeviceID(undefined);
+
+ // If we start up without knowing the hashed UID, it should stay undefined.
+ telem.observe(null, "weave:service:ready");
+ await assertRecordedSyncDeviceID(undefined);
+
+ // But now let's say we've discovered the hashed UID from the server.
+ fxAccounts.telemetry._setHashedUID(MOCK_HASHED_UID);
+ currentDeviceID = MOCK_DEVICE_ID1;
+
+ // Now when we load up, we'll record the sync device id.
+ telem.observe(null, "weave:service:ready");
+ await assertRecordedSyncDeviceID(SANITIZED_DEVICE_ID1);
+
+ // When the device-id changes we'll update it.
+ currentDeviceID = MOCK_DEVICE_ID2;
+ telem.observe(null, "fxaccounts:new_device_id");
+ await assertRecordedSyncDeviceID(SANITIZED_DEVICE_ID2);
+
+ // When the user signs out we'll clear it.
+ telem.observe(null, "fxaccounts:onlogout");
+ await assertRecordedSyncDeviceID("");
+ } finally {
+ fxAccounts.telemetry._setHashedUID(false);
+ telem.isProductionSyncUser.restore();
+ fxAccounts.device.getLocalId.restore();
+ }
+});
diff --git a/services/sync/tests/unit/test_tracker_addChanged.js b/services/sync/tests/unit/test_tracker_addChanged.js
new file mode 100644
index 0000000000..7f510794fc
--- /dev/null
+++ b/services/sync/tests/unit/test_tracker_addChanged.js
@@ -0,0 +1,59 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function test_tracker_basics() {
+ let tracker = new LegacyTracker("Tracker", Service);
+
+ let id = "the_id!";
+
+ _("Make sure nothing exists yet..");
+ let changes = await tracker.getChangedIDs();
+ Assert.equal(changes[id], null);
+
+ _("Make sure adding of time 0 works");
+ await tracker.addChangedID(id, 0);
+ changes = await tracker.getChangedIDs();
+ Assert.equal(changes[id], 0);
+
+ _("A newer time will replace the old 0");
+ await tracker.addChangedID(id, 10);
+ changes = await tracker.getChangedIDs();
+ Assert.equal(changes[id], 10);
+
+ _("An older time will not replace the newer 10");
+ await tracker.addChangedID(id, 5);
+ changes = await tracker.getChangedIDs();
+ Assert.equal(changes[id], 10);
+
+ _("Adding without time defaults to current time");
+ await tracker.addChangedID(id);
+ changes = await tracker.getChangedIDs();
+ Assert.ok(changes[id] > 10);
+});
+
+add_task(async function test_tracker_persistence() {
+ let tracker = new LegacyTracker("Tracker", Service);
+ let id = "abcdef";
+
+ let promiseSave = new Promise((resolve, reject) => {
+ let save = tracker._storage._save;
+ tracker._storage._save = function () {
+ save.call(tracker._storage).then(resolve, reject);
+ };
+ });
+
+ await tracker.addChangedID(id, 5);
+
+ await promiseSave;
+
+ _("IDs saved.");
+ const changes = await tracker.getChangedIDs();
+ Assert.equal(5, changes[id]);
+
+ let json = await Utils.jsonLoad(["changes", "tracker"], tracker);
+ Assert.equal(5, json[id]);
+});
diff --git a/services/sync/tests/unit/test_uistate.js b/services/sync/tests/unit/test_uistate.js
new file mode 100644
index 0000000000..cb1ff1979e
--- /dev/null
+++ b/services/sync/tests/unit/test_uistate.js
@@ -0,0 +1,324 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { UIState } = ChromeUtils.importESModule(
+ "resource://services-sync/UIState.sys.mjs"
+);
+
+const UIStateInternal = UIState._internal;
+
+add_task(async function test_isReady_unconfigured() {
+ UIState.reset();
+
+ let refreshState = sinon.spy(UIStateInternal, "refreshState");
+
+ // On the first call, returns false
+ // Does trigger a refresh of the state - even though services.sync.username
+ // is undefined we still need to check the account state.
+ ok(!UIState.isReady());
+ // resfreshState is called when idle - so only check after idle.
+ await new Promise(resolve => {
+ Services.tm.idleDispatchToMainThread(resolve);
+ });
+ ok(refreshState.called);
+ refreshState.resetHistory();
+
+ // On subsequent calls, only return true
+ ok(UIState.isReady());
+ ok(!refreshState.called);
+
+ refreshState.restore();
+});
+
+add_task(async function test_isReady_signedin() {
+ UIState.reset();
+ Services.prefs.setStringPref("services.sync.username", "foo");
+
+ let refreshState = sinon.spy(UIStateInternal, "refreshState");
+
+ // On the first call, returns false and triggers a refresh of the state
+ ok(!UIState.isReady());
+ await new Promise(resolve => {
+ Services.tm.idleDispatchToMainThread(resolve);
+ });
+ ok(refreshState.calledOnce);
+ refreshState.resetHistory();
+
+ // On subsequent calls, only return true
+ ok(UIState.isReady());
+ ok(!refreshState.called);
+
+ refreshState.restore();
+});
+
+add_task(async function test_refreshState_signedin() {
+ UIState.reset();
+ const fxAccountsOrig = UIStateInternal.fxAccounts;
+
+ const now = new Date().toString();
+ Services.prefs.setStringPref("services.sync.lastSync", now);
+ UIStateInternal.syncing = false;
+
+ UIStateInternal.fxAccounts = {
+ getSignedInUser: () =>
+ Promise.resolve({
+ verified: true,
+ uid: "123",
+ email: "foo@bar.com",
+ displayName: "Foo Bar",
+ avatar: "https://foo/bar",
+ }),
+ hasLocalSession: () => Promise.resolve(true),
+ };
+
+ let state = await UIState.refresh();
+
+ equal(state.status, UIState.STATUS_SIGNED_IN);
+ equal(state.uid, "123");
+ equal(state.email, "foo@bar.com");
+ equal(state.displayName, "Foo Bar");
+ equal(state.avatarURL, "https://foo/bar");
+ equal(state.lastSync, now);
+ equal(state.syncing, false);
+
+ UIStateInternal.fxAccounts = fxAccountsOrig;
+});
+
+add_task(async function test_refreshState_syncButNoFxA() {
+ UIState.reset();
+ const fxAccountsOrig = UIStateInternal.fxAccounts;
+
+ const now = new Date().toString();
+ Services.prefs.setStringPref("services.sync.lastSync", now);
+ Services.prefs.setStringPref("services.sync.username", "test@test.com");
+ UIStateInternal.syncing = false;
+
+ UIStateInternal.fxAccounts = {
+ getSignedInUser: () => Promise.resolve(null),
+ };
+
+ let state = await UIState.refresh();
+
+ equal(state.status, UIState.STATUS_LOGIN_FAILED);
+ equal(state.uid, undefined);
+ equal(state.email, "test@test.com");
+ equal(state.displayName, undefined);
+ equal(state.avatarURL, undefined);
+ equal(state.lastSync, undefined); // only set when STATUS_SIGNED_IN.
+ equal(state.syncing, false);
+
+ UIStateInternal.fxAccounts = fxAccountsOrig;
+ Services.prefs.clearUserPref("services.sync.lastSync");
+ Services.prefs.clearUserPref("services.sync.username");
+});
+
+add_task(async function test_refreshState_signedin_profile_unavailable() {
+ UIState.reset();
+ const fxAccountsOrig = UIStateInternal.fxAccounts;
+
+ const now = new Date().toString();
+ Services.prefs.setStringPref("services.sync.lastSync", now);
+ Services.prefs.setStringPref("services.sync.username", "test@test.com");
+ UIStateInternal.syncing = false;
+
+ UIStateInternal.fxAccounts = {
+ getSignedInUser: () =>
+ Promise.resolve({ verified: true, uid: "123", email: "foo@bar.com" }),
+ hasLocalSession: () => Promise.resolve(true),
+ _internal: {
+ profile: {
+ getProfile: () => {
+ return Promise.reject(new Error("Profile unavailable"));
+ },
+ },
+ },
+ };
+
+ let state = await UIState.refresh();
+
+ equal(state.status, UIState.STATUS_SIGNED_IN);
+ equal(state.uid, "123");
+ equal(state.email, "foo@bar.com");
+ equal(state.displayName, undefined);
+ equal(state.avatarURL, undefined);
+ equal(state.lastSync, now);
+ equal(state.syncing, false);
+
+ UIStateInternal.fxAccounts = fxAccountsOrig;
+ Services.prefs.clearUserPref("services.sync.lastSync");
+ Services.prefs.clearUserPref("services.sync.username");
+});
+
+add_task(async function test_refreshState_unverified() {
+ UIState.reset();
+ const fxAccountsOrig = UIStateInternal.fxAccounts;
+
+ UIStateInternal.fxAccounts = {
+ getSignedInUser: () =>
+ Promise.resolve({ verified: false, uid: "123", email: "foo@bar.com" }),
+ hasLocalSession: () => Promise.resolve(true),
+ };
+
+ let state = await UIState.refresh();
+
+ equal(state.status, UIState.STATUS_NOT_VERIFIED);
+ equal(state.uid, "123");
+ equal(state.email, "foo@bar.com");
+ equal(state.displayName, undefined);
+ equal(state.avatarURL, undefined);
+ equal(state.lastSync, undefined);
+
+ UIStateInternal.fxAccounts = fxAccountsOrig;
+});
+
+add_task(async function test_refreshState_unverified_nosession() {
+ UIState.reset();
+ const fxAccountsOrig = UIStateInternal.fxAccounts;
+
+ UIStateInternal.fxAccounts = {
+ getSignedInUser: () =>
+ Promise.resolve({ verified: false, uid: "123", email: "foo@bar.com" }),
+ hasLocalSession: () => Promise.resolve(false),
+ };
+
+ let state = await UIState.refresh();
+
+ // No session should "win" over the unverified state.
+ equal(state.status, UIState.STATUS_LOGIN_FAILED);
+ equal(state.uid, "123");
+ equal(state.email, "foo@bar.com");
+ equal(state.displayName, undefined);
+ equal(state.avatarURL, undefined);
+ equal(state.lastSync, undefined);
+
+ UIStateInternal.fxAccounts = fxAccountsOrig;
+});
+
+add_task(async function test_refreshState_loginFailed() {
+ UIState.reset();
+ const fxAccountsOrig = UIStateInternal.fxAccounts;
+
+ let loginFailed = sinon.stub(UIStateInternal, "_loginFailed");
+ loginFailed.returns(true);
+
+ UIStateInternal.fxAccounts = {
+ getSignedInUser: () =>
+ Promise.resolve({ verified: true, uid: "123", email: "foo@bar.com" }),
+ };
+
+ let state = await UIState.refresh();
+
+ equal(state.status, UIState.STATUS_LOGIN_FAILED);
+ equal(state.uid, "123");
+ equal(state.email, "foo@bar.com");
+ equal(state.displayName, undefined);
+ equal(state.avatarURL, undefined);
+ equal(state.lastSync, undefined);
+
+ loginFailed.restore();
+ UIStateInternal.fxAccounts = fxAccountsOrig;
+});
+
+add_task(async function test_observer_refreshState() {
+ let refreshState = sinon.spy(UIStateInternal, "refreshState");
+
+ let shouldRefresh = [
+ "weave:service:login:got-hashed-id",
+ "weave:service:login:error",
+ "weave:service:ready",
+ "fxaccounts:onverified",
+ "fxaccounts:onlogin",
+ "fxaccounts:onlogout",
+ "fxaccounts:profilechange",
+ ];
+
+ for (let topic of shouldRefresh) {
+ let uiUpdateObserved = observeUIUpdate();
+ Services.obs.notifyObservers(null, topic);
+ await uiUpdateObserved;
+ ok(refreshState.calledOnce);
+ refreshState.resetHistory();
+ }
+
+ refreshState.restore();
+});
+
+// Drive the UIState in a configured state.
+async function configureUIState(syncing, lastSync = new Date()) {
+ UIState.reset();
+ const fxAccountsOrig = UIStateInternal.fxAccounts;
+
+ UIStateInternal._syncing = syncing;
+ Services.prefs.setStringPref("services.sync.lastSync", lastSync.toString());
+ Services.prefs.setStringPref("services.sync.username", "test@test.com");
+
+ UIStateInternal.fxAccounts = {
+ getSignedInUser: () =>
+ Promise.resolve({ verified: true, uid: "123", email: "foo@bar.com" }),
+ hasLocalSession: () => Promise.resolve(true),
+ };
+ await UIState.refresh();
+ UIStateInternal.fxAccounts = fxAccountsOrig;
+}
+
+add_task(async function test_syncStarted() {
+ await configureUIState(false);
+
+ const oldState = Object.assign({}, UIState.get());
+ ok(!oldState.syncing);
+
+ let uiUpdateObserved = observeUIUpdate();
+ Services.obs.notifyObservers(null, "weave:service:sync:start");
+ await uiUpdateObserved;
+
+ const newState = Object.assign({}, UIState.get());
+ ok(newState.syncing);
+});
+
+add_task(async function test_syncFinished() {
+ let yesterday = new Date();
+ yesterday.setDate(yesterday.getDate() - 1);
+ await configureUIState(true, yesterday);
+
+ const oldState = Object.assign({}, UIState.get());
+ ok(oldState.syncing);
+
+ let uiUpdateObserved = observeUIUpdate();
+ Services.prefs.setStringPref("services.sync.lastSync", new Date().toString());
+ Services.obs.notifyObservers(null, "weave:service:sync:finish");
+ await uiUpdateObserved;
+
+ const newState = Object.assign({}, UIState.get());
+ ok(!newState.syncing);
+ ok(new Date(newState.lastSync) > new Date(oldState.lastSync));
+});
+
+add_task(async function test_syncError() {
+ let yesterday = new Date();
+ yesterday.setDate(yesterday.getDate() - 1);
+ await configureUIState(true, yesterday);
+
+ const oldState = Object.assign({}, UIState.get());
+ ok(oldState.syncing);
+
+ let uiUpdateObserved = observeUIUpdate();
+ Services.obs.notifyObservers(null, "weave:service:sync:error");
+ await uiUpdateObserved;
+
+ const newState = Object.assign({}, UIState.get());
+ ok(!newState.syncing);
+ deepEqual(newState.lastSync, oldState.lastSync);
+});
+
+function observeUIUpdate() {
+ return new Promise(resolve => {
+ let obs = (aSubject, aTopic, aData) => {
+ Services.obs.removeObserver(obs, aTopic);
+ const state = UIState.get();
+ resolve(state);
+ };
+ Services.obs.addObserver(obs, UIState.ON_UPDATE);
+ });
+}
diff --git a/services/sync/tests/unit/test_utils_catch.js b/services/sync/tests/unit/test_utils_catch.js
new file mode 100644
index 0000000000..590d04527f
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_catch.js
@@ -0,0 +1,119 @@
+const { Service } = ChromeUtils.importESModule(
+ "resource://services-sync/service.sys.mjs"
+);
+
+add_task(async function run_test() {
+ _("Make sure catch when copied to an object will correctly catch stuff");
+ let ret, rightThis, didCall, didThrow, wasCovfefe, wasLocked;
+ let obj = {
+ _catch: Utils.catch,
+ _log: {
+ debug(str) {
+ didThrow = str.search(/^Exception/) == 0;
+ },
+ info(str) {
+ wasLocked = str.indexOf("Cannot start sync: already syncing?") == 0;
+ },
+ },
+
+ func() {
+ return this._catch(async function () {
+ rightThis = this == obj;
+ didCall = true;
+ return 5;
+ })();
+ },
+
+ throwy() {
+ return this._catch(async function () {
+ rightThis = this == obj;
+ didCall = true;
+ throw new Error("covfefe");
+ })();
+ },
+
+ callbacky() {
+ return this._catch(
+ async function () {
+ rightThis = this == obj;
+ didCall = true;
+ throw new Error("covfefe");
+ },
+ async function (ex) {
+ wasCovfefe = ex && ex.message == "covfefe";
+ }
+ )();
+ },
+
+ lockedy() {
+ return this._catch(async function () {
+ rightThis = this == obj;
+ didCall = true;
+ Utils.throwLockException(null);
+ })();
+ },
+
+ lockedy_chained() {
+ return this._catch(async function () {
+ rightThis = this == obj;
+ didCall = true;
+ Utils.throwLockException(null);
+ })();
+ },
+ };
+
+ _("Make sure a normal call will call and return");
+ rightThis = didCall = didThrow = wasLocked = false;
+ ret = await obj.func();
+ Assert.equal(ret, 5);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+ Assert.ok(!didThrow);
+ Assert.equal(wasCovfefe, undefined);
+ Assert.ok(!wasLocked);
+
+ _(
+ "Make sure catch/throw results in debug call and caller doesn't need to handle exception"
+ );
+ rightThis = didCall = didThrow = wasLocked = false;
+ ret = await obj.throwy();
+ Assert.equal(ret, undefined);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+ Assert.ok(didThrow);
+ Assert.equal(wasCovfefe, undefined);
+ Assert.ok(!wasLocked);
+
+ _("Test callback for exception testing.");
+ rightThis = didCall = didThrow = wasLocked = false;
+ ret = await obj.callbacky();
+ Assert.equal(ret, undefined);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+ Assert.ok(didThrow);
+ Assert.ok(wasCovfefe);
+ Assert.ok(!wasLocked);
+
+ _("Test the lock-aware catch that Service uses.");
+ obj._catch = Service._catch;
+ rightThis = didCall = didThrow = wasLocked = false;
+ wasCovfefe = undefined;
+ ret = await obj.lockedy();
+ Assert.equal(ret, undefined);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+ Assert.ok(didThrow);
+ Assert.equal(wasCovfefe, undefined);
+ Assert.ok(wasLocked);
+
+ _("Test the lock-aware catch that Service uses with a chained promise.");
+ rightThis = didCall = didThrow = wasLocked = false;
+ wasCovfefe = undefined;
+ ret = await obj.lockedy_chained();
+ Assert.equal(ret, undefined);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+ Assert.ok(didThrow);
+ Assert.equal(wasCovfefe, undefined);
+ Assert.ok(wasLocked);
+});
diff --git a/services/sync/tests/unit/test_utils_deepEquals.js b/services/sync/tests/unit/test_utils_deepEquals.js
new file mode 100644
index 0000000000..218cc21b72
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_deepEquals.js
@@ -0,0 +1,51 @@
+_("Make sure Utils.deepEquals correctly finds items that are deeply equal");
+
+Services.prefs.setBoolPref("security.allow_eval_with_system_principal", true);
+registerCleanupFunction(() => {
+ Services.prefs.clearUserPref("security.allow_eval_with_system_principal");
+});
+
+function run_test() {
+ let data =
+ '[NaN, undefined, null, true, false, Infinity, 0, 1, "a", "b", {a: 1}, {a: "a"}, [{a: 1}], [{a: true}], {a: 1, b: 2}, [1, 2], [1, 2, 3]]';
+ _("Generating two copies of data:", data);
+ /* eslint-disable no-eval */
+ let d1 = eval(data);
+ let d2 = eval(data);
+ /* eslint-enable no-eval */
+
+ d1.forEach(function (a) {
+ _("Testing", a, typeof a, JSON.stringify([a]));
+ let numMatch = 0;
+
+ d2.forEach(function (b) {
+ if (Utils.deepEquals(a, b)) {
+ numMatch++;
+ _("Found a match", b, typeof b, JSON.stringify([b]));
+ }
+ });
+
+ let expect = 1;
+ if (isNaN(a) && typeof a == "number") {
+ expect = 0;
+ _("Checking NaN should result in no matches");
+ }
+
+ _("Making sure we found the correct # match:", expect);
+ _("Actual matches:", numMatch);
+ Assert.equal(numMatch, expect);
+ });
+
+ _("Make sure adding undefined properties doesn't affect equalness");
+ let a = {};
+ let b = { a: undefined };
+ Assert.ok(Utils.deepEquals(a, b));
+ a.b = 5;
+ Assert.ok(!Utils.deepEquals(a, b));
+ b.b = 5;
+ Assert.ok(Utils.deepEquals(a, b));
+ a.c = undefined;
+ Assert.ok(Utils.deepEquals(a, b));
+ b.d = undefined;
+ Assert.ok(Utils.deepEquals(a, b));
+}
diff --git a/services/sync/tests/unit/test_utils_deferGetSet.js b/services/sync/tests/unit/test_utils_deferGetSet.js
new file mode 100644
index 0000000000..6db812b844
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_deferGetSet.js
@@ -0,0 +1,50 @@
+_(
+ "Make sure various combinations of deferGetSet arguments correctly defer getting/setting properties to another object"
+);
+
+function run_test() {
+ let base = function () {};
+ base.prototype = {
+ dst: {},
+
+ get a() {
+ return "a";
+ },
+ set b(val) {
+ this.dst.b = val + "!!!";
+ },
+ };
+ let src = new base();
+
+ _("get/set a single property");
+ Utils.deferGetSet(base, "dst", "foo");
+ src.foo = "bar";
+ Assert.equal(src.dst.foo, "bar");
+ Assert.equal(src.foo, "bar");
+
+ _("editing the target also updates the source");
+ src.dst.foo = "baz";
+ Assert.equal(src.dst.foo, "baz");
+ Assert.equal(src.foo, "baz");
+
+ _("handle multiple properties");
+ Utils.deferGetSet(base, "dst", ["p1", "p2"]);
+ src.p1 = "v1";
+ src.p2 = "v2";
+ Assert.equal(src.p1, "v1");
+ Assert.equal(src.dst.p1, "v1");
+ Assert.equal(src.p2, "v2");
+ Assert.equal(src.dst.p2, "v2");
+
+ _("make sure existing getter keeps its functionality");
+ Utils.deferGetSet(base, "dst", "a");
+ src.a = "not a";
+ Assert.equal(src.dst.a, "not a");
+ Assert.equal(src.a, "a");
+
+ _("make sure existing setter keeps its functionality");
+ Utils.deferGetSet(base, "dst", "b");
+ src.b = "b";
+ Assert.equal(src.dst.b, "b!!!");
+ Assert.equal(src.b, "b!!!");
+}
diff --git a/services/sync/tests/unit/test_utils_json.js b/services/sync/tests/unit/test_utils_json.js
new file mode 100644
index 0000000000..5bf26b2361
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_json.js
@@ -0,0 +1,95 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+
+add_task(async function test_roundtrip() {
+ _("Do a simple write of an array to json and read");
+ await Utils.jsonSave("foo", {}, ["v1", "v2"]);
+
+ let foo = await Utils.jsonLoad("foo", {});
+ Assert.equal(typeof foo, "object");
+ Assert.equal(foo.length, 2);
+ Assert.equal(foo[0], "v1");
+ Assert.equal(foo[1], "v2");
+});
+
+add_task(async function test_string() {
+ _("Try saving simple strings");
+ await Utils.jsonSave("str", {}, "hi");
+
+ let str = await Utils.jsonLoad("str", {});
+ Assert.equal(typeof str, "string");
+ Assert.equal(str.length, 2);
+ Assert.equal(str[0], "h");
+ Assert.equal(str[1], "i");
+});
+
+add_task(async function test_number() {
+ _("Try saving a number");
+ await Utils.jsonSave("num", {}, 42);
+
+ let num = await Utils.jsonLoad("num", {});
+ Assert.equal(typeof num, "number");
+ Assert.equal(num, 42);
+});
+
+add_task(async function test_nonexistent_file() {
+ _("Try loading a non-existent file.");
+ let val = await Utils.jsonLoad("non-existent", {});
+ Assert.equal(val, undefined);
+});
+
+add_task(async function test_save_logging() {
+ _("Verify that writes are logged.");
+ let trace;
+ await Utils.jsonSave(
+ "log",
+ {
+ _log: {
+ trace(msg) {
+ trace = msg;
+ },
+ },
+ },
+ "hi"
+ );
+ Assert.ok(!!trace);
+});
+
+add_task(async function test_load_logging() {
+ _("Verify that reads and read errors are logged.");
+
+ // Write a file with some invalid JSON
+ let file = await IOUtils.getFile(PathUtils.profileDir, "weave", "log.json");
+ let fos = Cc["@mozilla.org/network/file-output-stream;1"].createInstance(
+ Ci.nsIFileOutputStream
+ );
+ let flags =
+ FileUtils.MODE_WRONLY | FileUtils.MODE_CREATE | FileUtils.MODE_TRUNCATE;
+ fos.init(file, flags, FileUtils.PERMS_FILE, fos.DEFER_OPEN);
+ let stream = Cc["@mozilla.org/intl/converter-output-stream;1"].createInstance(
+ Ci.nsIConverterOutputStream
+ );
+ stream.init(fos, "UTF-8");
+ stream.writeString("invalid json!");
+ stream.close();
+
+ let trace, debug;
+ let obj = {
+ _log: {
+ trace(msg) {
+ trace = msg;
+ },
+ debug(msg) {
+ debug = msg;
+ },
+ },
+ };
+ let val = await Utils.jsonLoad("log", obj);
+ Assert.ok(!val);
+ Assert.ok(!!trace);
+ Assert.ok(!!debug);
+});
diff --git a/services/sync/tests/unit/test_utils_keyEncoding.js b/services/sync/tests/unit/test_utils_keyEncoding.js
new file mode 100644
index 0000000000..30a8a4f2aa
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_keyEncoding.js
@@ -0,0 +1,23 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ Assert.equal(
+ Utils.encodeKeyBase32("foobarbafoobarba"),
+ "mzxw6ytb9jrgcztpn5rgc4tcme"
+ );
+ Assert.equal(
+ Utils.decodeKeyBase32("mzxw6ytb9jrgcztpn5rgc4tcme"),
+ "foobarbafoobarba"
+ );
+ Assert.equal(
+ Utils.encodeKeyBase32(
+ "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01"
+ ),
+ "aeaqcaibaeaqcaibaeaqcaibae"
+ );
+ Assert.equal(
+ Utils.decodeKeyBase32("aeaqcaibaeaqcaibaeaqcaibae"),
+ "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01"
+ );
+}
diff --git a/services/sync/tests/unit/test_utils_lock.js b/services/sync/tests/unit/test_utils_lock.js
new file mode 100644
index 0000000000..71d6486ff9
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_lock.js
@@ -0,0 +1,76 @@
+_("Make sure lock prevents calling with a shared lock");
+
+// Utility that we only use here.
+
+function do_check_begins(thing, startsWith) {
+ if (!(thing && thing.indexOf && thing.indexOf(startsWith) == 0)) {
+ do_throw(thing + " doesn't begin with " + startsWith);
+ }
+}
+
+add_task(async function run_test() {
+ let ret, rightThis, didCall;
+ let state, lockState, lockedState, unlockState;
+ let obj = {
+ _lock: Utils.lock,
+ lock() {
+ lockState = ++state;
+ if (this._locked) {
+ lockedState = ++state;
+ return false;
+ }
+ this._locked = true;
+ return true;
+ },
+ unlock() {
+ unlockState = ++state;
+ this._locked = false;
+ },
+
+ func() {
+ return this._lock("Test utils lock", async function () {
+ rightThis = this == obj;
+ didCall = true;
+ return 5;
+ })();
+ },
+
+ throwy() {
+ return this._lock("Test utils lock throwy", async function () {
+ rightThis = this == obj;
+ didCall = true;
+ return this.throwy();
+ })();
+ },
+ };
+
+ _("Make sure a normal call will call and return");
+ rightThis = didCall = false;
+ state = 0;
+ ret = await obj.func();
+ Assert.equal(ret, 5);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+ Assert.equal(lockState, 1);
+ Assert.equal(unlockState, 2);
+ Assert.equal(state, 2);
+
+ _("Make sure code that calls locked code throws");
+ ret = null;
+ rightThis = didCall = false;
+ try {
+ ret = await obj.throwy();
+ do_throw("throwy internal call should have thrown!");
+ } catch (ex) {
+ // Should throw an Error, not a string.
+ do_check_begins(ex.message, "Could not acquire lock");
+ }
+ Assert.equal(ret, null);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+ _("Lock should be called twice so state 3 is skipped");
+ Assert.equal(lockState, 4);
+ Assert.equal(lockedState, 5);
+ Assert.equal(unlockState, 6);
+ Assert.equal(state, 6);
+});
diff --git a/services/sync/tests/unit/test_utils_makeGUID.js b/services/sync/tests/unit/test_utils_makeGUID.js
new file mode 100644
index 0000000000..b1104c1114
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_makeGUID.js
@@ -0,0 +1,44 @@
+const base64url =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_";
+
+function run_test() {
+ _("Make sure makeGUID makes guids of the right length/characters");
+ _("Create a bunch of guids to make sure they don't conflict");
+ let guids = [];
+ for (let i = 0; i < 1000; i++) {
+ let newGuid = Utils.makeGUID();
+ _("Generated " + newGuid);
+
+ // Verify that the GUID's length is correct, even when it's URL encoded.
+ Assert.equal(newGuid.length, 12);
+ Assert.equal(encodeURIComponent(newGuid).length, 12);
+
+ // Verify that the GUID only contains base64url characters
+ Assert.ok(
+ Array.prototype.every.call(newGuid, function (chr) {
+ return base64url.includes(chr);
+ })
+ );
+
+ // Verify that Utils.checkGUID() correctly identifies them as valid.
+ Assert.ok(Utils.checkGUID(newGuid));
+
+ // Verify uniqueness within our sample of 1000. This could cause random
+ // failures, but they should be extremely rare. Otherwise we'd have a
+ // problem with GUID collisions.
+ Assert.ok(
+ guids.every(function (g) {
+ return g != newGuid;
+ })
+ );
+ guids.push(newGuid);
+ }
+
+ _("Make sure checkGUID fails for invalid GUIDs");
+ Assert.ok(!Utils.checkGUID(undefined));
+ Assert.ok(!Utils.checkGUID(null));
+ Assert.ok(!Utils.checkGUID(""));
+ Assert.ok(!Utils.checkGUID("blergh"));
+ Assert.ok(!Utils.checkGUID("ThisGUIDisWayTooLong"));
+ Assert.ok(!Utils.checkGUID("Invalid!!!!!"));
+}
diff --git a/services/sync/tests/unit/test_utils_notify.js b/services/sync/tests/unit/test_utils_notify.js
new file mode 100644
index 0000000000..5c0c3702a6
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_notify.js
@@ -0,0 +1,97 @@
+_("Make sure notify sends out the right notifications");
+add_task(async function run_test() {
+ let ret, rightThis, didCall;
+ let obj = {
+ notify: Utils.notify("foo:"),
+ _log: {
+ trace() {},
+ },
+
+ func() {
+ return this.notify("bar", "baz", async function () {
+ rightThis = this == obj;
+ didCall = true;
+ return 5;
+ })();
+ },
+
+ throwy() {
+ return this.notify("bad", "one", async function () {
+ rightThis = this == obj;
+ didCall = true;
+ throw new Error("covfefe");
+ })();
+ },
+ };
+
+ let state = 0;
+ let makeObs = function (topic) {
+ let obj2 = {
+ observe(subject, obsTopic, data) {
+ this.state = ++state;
+ this.subject = subject;
+ this.topic = obsTopic;
+ this.data = data;
+ },
+ };
+
+ Svc.Obs.add(topic, obj2);
+ return obj2;
+ };
+
+ _("Make sure a normal call will call and return with notifications");
+ rightThis = didCall = false;
+ let fs = makeObs("foo:bar:start");
+ let ff = makeObs("foo:bar:finish");
+ let fe = makeObs("foo:bar:error");
+ ret = await obj.func();
+ Assert.equal(ret, 5);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+
+ Assert.equal(fs.state, 1);
+ Assert.equal(fs.subject, undefined);
+ Assert.equal(fs.topic, "foo:bar:start");
+ Assert.equal(fs.data, "baz");
+
+ Assert.equal(ff.state, 2);
+ Assert.equal(ff.subject, 5);
+ Assert.equal(ff.topic, "foo:bar:finish");
+ Assert.equal(ff.data, "baz");
+
+ Assert.equal(fe.state, undefined);
+ Assert.equal(fe.subject, undefined);
+ Assert.equal(fe.topic, undefined);
+ Assert.equal(fe.data, undefined);
+
+ _("Make sure a throwy call will call and throw with notifications");
+ ret = null;
+ rightThis = didCall = false;
+ let ts = makeObs("foo:bad:start");
+ let tf = makeObs("foo:bad:finish");
+ let te = makeObs("foo:bad:error");
+ try {
+ ret = await obj.throwy();
+ do_throw("throwy should have thrown!");
+ } catch (ex) {
+ Assert.equal(ex.message, "covfefe");
+ }
+ Assert.equal(ret, null);
+ Assert.ok(rightThis);
+ Assert.ok(didCall);
+
+ Assert.equal(ts.state, 3);
+ Assert.equal(ts.subject, undefined);
+ Assert.equal(ts.topic, "foo:bad:start");
+ Assert.equal(ts.data, "one");
+
+ Assert.equal(tf.state, undefined);
+ Assert.equal(tf.subject, undefined);
+ Assert.equal(tf.topic, undefined);
+ Assert.equal(tf.data, undefined);
+
+ Assert.equal(te.state, 4);
+ Assert.equal(te.subject.message, "covfefe");
+ Assert.equal(te.topic, "foo:bad:error");
+ Assert.equal(te.data, "one");
+});
diff --git a/services/sync/tests/unit/test_utils_passphrase.js b/services/sync/tests/unit/test_utils_passphrase.js
new file mode 100644
index 0000000000..fa58086113
--- /dev/null
+++ b/services/sync/tests/unit/test_utils_passphrase.js
@@ -0,0 +1,45 @@
+/* eslint no-tabs:"off" */
+
+function run_test() {
+ _("Normalize passphrase recognizes hyphens.");
+ const pp = "26ect2thczm599m2ffqarbicjq";
+ const hyphenated = "2-6ect2-thczm-599m2-ffqar-bicjq";
+ Assert.equal(Utils.normalizePassphrase(hyphenated), pp);
+
+ _("Skip whitespace.");
+ Assert.equal(
+ "aaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Utils.normalizePassphrase("aaaaaaaaaaaaaaaaaaaaaaaaaa ")
+ );
+ Assert.equal(
+ "aaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Utils.normalizePassphrase(" aaaaaaaaaaaaaaaaaaaaaaaaaa")
+ );
+ Assert.equal(
+ "aaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Utils.normalizePassphrase(" aaaaaaaaaaaaaaaaaaaaaaaaaa ")
+ );
+ Assert.equal(
+ "aaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Utils.normalizePassphrase(" a-aaaaa-aaaaa-aaaaa-aaaaa-aaaaa ")
+ );
+ Assert.ok(Utils.isPassphrase("aaaaaaaaaaaaaaaaaaaaaaaaaa "));
+ Assert.ok(Utils.isPassphrase(" aaaaaaaaaaaaaaaaaaaaaaaaaa"));
+ Assert.ok(Utils.isPassphrase(" aaaaaaaaaaaaaaaaaaaaaaaaaa "));
+ Assert.ok(Utils.isPassphrase(" a-aaaaa-aaaaa-aaaaa-aaaaa-aaaaa "));
+ Assert.ok(!Utils.isPassphrase(" -aaaaa-aaaaa-aaaaa-aaaaa-aaaaa "));
+
+ _("Normalizing 20-char passphrases.");
+ Assert.equal(
+ Utils.normalizePassphrase("abcde-abcde-abcde-abcde"),
+ "abcdeabcdeabcdeabcde"
+ );
+ Assert.equal(
+ Utils.normalizePassphrase("a-bcde-abcde-abcde-abcde"),
+ "a-bcde-abcde-abcde-abcde"
+ );
+ Assert.equal(
+ Utils.normalizePassphrase(" abcde-abcde-abcde-abcde "),
+ "abcdeabcdeabcdeabcde"
+ );
+}
diff --git a/services/sync/tests/unit/xpcshell.toml b/services/sync/tests/unit/xpcshell.toml
new file mode 100644
index 0000000000..e958c8a738
--- /dev/null
+++ b/services/sync/tests/unit/xpcshell.toml
@@ -0,0 +1,304 @@
+[DEFAULT]
+head = "head_appinfo.js ../../../common/tests/unit/head_helpers.js head_helpers.js head_http_server.js head_errorhandler_common.js"
+firefox-appdir = "browser"
+prefs = ["identity.fxaccounts.enabled=true"]
+support-files = [
+ "addon1-search.json",
+ "bootstrap1-search.json",
+ "missing-sourceuri.json",
+ "missing-xpi-search.json",
+ "rewrite-search.json",
+ "sync_ping_schema.json",
+ "systemaddon-search.json",
+ "!/services/common/tests/unit/head_helpers.js",
+ "!/toolkit/components/extensions/test/xpcshell/head_sync.js",
+]
+
+# The manifest is roughly ordered from low-level to high-level. When making
+# systemic sweeping changes, this makes it easier to identify errors closer to
+# the source.
+
+# Ensure we can import everything.
+
+["test_412.js"]
+
+["test_addon_utils.js"]
+run-sequentially = "Restarts server, can't change pref."
+tags = "addons"
+
+["test_addons_engine.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+tags = "addons"
+
+["test_addons_reconciler.js"]
+skip-if = ["appname == 'thunderbird'"]
+tags = "addons"
+
+["test_addons_store.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+tags = "addons"
+
+["test_addons_tracker.js"]
+tags = "addons"
+
+["test_addons_validator.js"]
+tags = "addons"
+
+["test_bookmark_batch_fail.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_bookmark_decline_undecline.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_bookmark_engine.js"]
+skip-if = [
+ "appname == 'thunderbird'",
+ "tsan", # Runs unreasonably slow on TSan, bug 1612707
+]
+
+["test_bookmark_order.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_bookmark_places_query_rewriting.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_bookmark_record.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_bookmark_store.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_bookmark_tracker.js"]
+skip-if = [
+ "appname == 'thunderbird'",
+ "tsan", # Runs unreasonably slow on TSan, bug 1612707
+]
+requesttimeoutfactor = 4
+
+["test_bridged_engine.js"]
+
+["test_clients_engine.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_clients_escape.js"]
+
+["test_collection_getBatched.js"]
+
+["test_collections_recovery.js"]
+
+["test_corrupt_keys.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_declined.js"]
+
+["test_disconnect_shutdown.js"]
+
+["test_engine.js"]
+
+["test_engine_abort.js"]
+
+["test_engine_changes_during_sync.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_enginemanager.js"]
+
+["test_errorhandler_1.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_errorhandler_2.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_errorhandler_filelog.js"]
+
+["test_errorhandler_sync_checkServerError.js"]
+
+["test_extension_storage_engine.js"]
+skip-if = ["appname == 'thunderbird'"]
+run-sequentially = "extension-storage migration happens only once, and must be tested first."
+
+["test_extension_storage_engine_kinto.js"]
+skip-if = ["appname == 'thunderbird'"]
+run-sequentially = "extension-storage migration happens only once, and must be tested first."
+
+["test_extension_storage_migration_telem.js"]
+skip-if = ["appname == 'thunderbird'"]
+run-sequentially = "extension-storage migration happens only once, and must be tested first."
+
+["test_extension_storage_tracker_kinto.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_form_validator.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_forms_store.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_forms_tracker.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_fxa_node_reassignment.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_fxa_service_cluster.js"]
+# Finally, we test each engine.
+
+["test_history_engine.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_history_store.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_history_tracker.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_hmac_error.js"]
+
+["test_httpd_sync_server.js"]
+# HTTP layers.
+
+["test_interval_triggers.js"]
+
+["test_keys.js"]
+
+["test_load_modules.js"]
+# util contains a bunch of functionality used throughout.
+
+["test_node_reassignment.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_password_engine.js"]
+
+["test_password_store.js"]
+
+["test_password_tracker.js"]
+
+["test_password_validator.js"]
+
+["test_postqueue.js"]
+# Synced tabs.
+
+["test_prefs_engine.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_prefs_store.js"]
+skip-if = ["appname == 'thunderbird'"]
+support-files = ["prefs_test_prefs_store.js"]
+
+["test_prefs_tracker.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_records_crypto.js"]
+
+["test_records_wbo.js"]
+
+["test_resource.js"]
+
+["test_resource_header.js"]
+
+["test_resource_ua.js"]
+# Generic Sync types.
+
+["test_score_triggers.js"]
+
+["test_service_attributes.js"]
+# Bug 752243: Profile cleanup frequently fails
+skip-if = [
+ "os == 'mac'",
+ "os == 'linux'",
+]
+
+["test_service_cluster.js"]
+
+["test_service_detect_upgrade.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_service_login.js"]
+
+["test_service_startOver.js"]
+
+["test_service_startup.js"]
+
+["test_service_sync_401.js"]
+
+["test_service_sync_locked.js"]
+
+["test_service_sync_remoteSetup.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_service_sync_specified.js"]
+
+["test_service_sync_updateEnabledEngines.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_service_verifyLogin.js"]
+
+["test_service_wipeClient.js"]
+
+["test_service_wipeServer.js"]
+# Bug 752243: Profile cleanup frequently fails
+skip-if = [
+ "os == 'mac'",
+ "os == 'linux'",
+]
+
+["test_status.js"]
+
+["test_status_checkSetup.js"]
+
+["test_sync_auth_manager.js"]
+# Engine APIs.
+
+["test_syncedtabs.js"]
+
+["test_syncengine.js"]
+
+["test_syncengine_sync.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+
+["test_syncscheduler.js"]
+run-sequentially = "Frequent timeouts, bug 1395148"
+# Firefox Accounts specific tests
+
+["test_tab_engine.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_tab_provider.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_tab_quickwrite.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_tab_tracker.js"]
+skip-if = ["appname == 'thunderbird'"]
+
+["test_telemetry.js"]
+skip-if = [
+ "appname == 'thunderbird'",
+ "tsan", # Unreasonably slow, bug 1612707
+]
+requesttimeoutfactor = 4
+
+["test_tracker_addChanged.js"]
+# Service semantics.
+
+["test_uistate.js"]
+
+["test_utils_catch.js"]
+
+["test_utils_deepEquals.js"]
+
+["test_utils_deferGetSet.js"]
+
+["test_utils_json.js"]
+
+["test_utils_keyEncoding.js"]
+
+["test_utils_lock.js"]
+
+["test_utils_makeGUID.js"]
+run-sequentially = "Disproportionately slows down full test run, bug 1450316"
+
+["test_utils_notify.js"]
+
+["test_utils_passphrase.js"]
+# We have a number of other libraries that are pretty much standalone.
diff --git a/services/sync/tps/extensions/tps/api.js b/services/sync/tps/extensions/tps/api.js
new file mode 100644
index 0000000000..0843376630
--- /dev/null
+++ b/services/sync/tps/extensions/tps/api.js
@@ -0,0 +1,77 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+
+/* globals ExtensionAPI, Services, XPCOMUtils */
+
+XPCOMUtils.defineLazyServiceGetter(
+ this,
+ "resProto",
+ "@mozilla.org/network/protocol;1?name=resource",
+ "nsISubstitutingProtocolHandler"
+);
+
+async function tpsStartup() {
+ try {
+ var { TPS } = ChromeUtils.importESModule("resource://tps/tps.sys.mjs");
+ let { goQuitApplication } = ChromeUtils.importESModule(
+ "resource://tps/quit.sys.mjs"
+ );
+ TPS.goQuitApplication = goQuitApplication;
+
+ let testFile = Services.prefs.getStringPref("testing.tps.testFile", "");
+ let testPhase = Services.prefs.getStringPref("testing.tps.testPhase", "");
+ if (!testFile || !testPhase) {
+ // Note: this quits.
+ TPS.DumpError(
+ "TPS no longer takes arguments from the command line. " +
+ "instead you need to pass preferences `testing.tps.{testFile,testPhase}` " +
+ "and optionally `testing.tps.{logFile,ignoreUnusedEngines}`.\n"
+ );
+ }
+
+ let logFile = Services.prefs.getStringPref("testing.tps.logFile", "");
+ let ignoreUnusedEngines = Services.prefs.getBoolPref(
+ "testing.tps.ignoreUnusedEngines",
+ false
+ );
+ let options = { ignoreUnusedEngines };
+ let testFileUri = Services.io.newFileURI(new FileUtils.File(testFile)).spec;
+
+ try {
+ await TPS.RunTestPhase(testFileUri, testPhase, logFile, options);
+ } catch (err) {
+ TPS.DumpError("TestPhase failed", err);
+ }
+ } catch (e) {
+ if (typeof TPS != "undefined") {
+ // Note: This calls quit() under the hood
+ TPS.DumpError("Test initialization failed", e);
+ }
+ dump(`TPS test initialization failed: ${e} - ${e.stack}\n`);
+ // Try and quit right away, no reason to wait around for python
+ // to kill us if initialization failed.
+ Services.startup.quit(Ci.nsIAppStartup.eForceQuit);
+ }
+}
+
+this.tps = class extends ExtensionAPI {
+ onStartup() {
+ resProto.setSubstitution(
+ "tps",
+ Services.io.newURI("resource/", null, this.extension.rootURI)
+ );
+ /* Ignore the platform's online/offline status while running tests. */
+ Services.io.manageOfflineStatus = false;
+ Services.io.offline = false;
+ tpsStartup();
+ }
+
+ onShutdown() {
+ resProto.setSubstitution("tps", null);
+ }
+};
diff --git a/services/sync/tps/extensions/tps/manifest.json b/services/sync/tps/extensions/tps/manifest.json
new file mode 100644
index 0000000000..c961e76506
--- /dev/null
+++ b/services/sync/tps/extensions/tps/manifest.json
@@ -0,0 +1,23 @@
+{
+ "manifest_version": 2,
+ "name": "TPS",
+ "version": "1.0",
+
+ "browser_specific_settings": {
+ "gecko": {
+ "id": "tps@mozilla.org"
+ }
+ },
+
+ "experiment_apis": {
+ "tps": {
+ "schema": "schema.json",
+ "parent": {
+ "scopes": ["addon_parent"],
+ "script": "api.js",
+ "paths": [["tps"]],
+ "events": ["startup"]
+ }
+ }
+ }
+}
diff --git a/services/sync/tps/extensions/tps/resource/auth/fxaccounts.sys.mjs b/services/sync/tps/extensions/tps/resource/auth/fxaccounts.sys.mjs
new file mode 100644
index 0000000000..81c0fd578a
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/auth/fxaccounts.sys.mjs
@@ -0,0 +1,209 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+import { clearTimeout, setTimeout } from "resource://gre/modules/Timer.sys.mjs";
+
+import { getFxAccountsSingleton } from "resource://gre/modules/FxAccounts.sys.mjs";
+
+const fxAccounts = getFxAccountsSingleton();
+import { FxAccountsClient } from "resource://gre/modules/FxAccountsClient.sys.mjs";
+import { FxAccountsConfig } from "resource://gre/modules/FxAccountsConfig.sys.mjs";
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+/**
+ * Helper object for Firefox Accounts authentication
+ */
+export var Authentication = {
+ /**
+ * Check if an user has been logged in
+ */
+ async isLoggedIn() {
+ return !!(await this.getSignedInUser());
+ },
+
+ async isReady() {
+ let user = await this.getSignedInUser();
+ return user && user.verified;
+ },
+
+ _getRestmailUsername(user) {
+ const restmailSuffix = "@restmail.net";
+ if (user.toLowerCase().endsWith(restmailSuffix)) {
+ return user.slice(0, -restmailSuffix.length);
+ }
+ return null;
+ },
+
+ async shortWaitForVerification(ms) {
+ let userData = await this.getSignedInUser();
+ let timeoutID;
+ let timeoutPromise = new Promise(resolve => {
+ timeoutID = setTimeout(() => {
+ Logger.logInfo(`Warning: no verification after ${ms}ms.`);
+ resolve();
+ }, ms);
+ });
+ await Promise.race([
+ fxAccounts.whenVerified(userData).finally(() => clearTimeout(timeoutID)),
+ timeoutPromise,
+ ]);
+ userData = await this.getSignedInUser();
+ return userData && userData.verified;
+ },
+
+ async _openVerificationPage(uri) {
+ let mainWindow = Services.wm.getMostRecentWindow("navigator:browser");
+ let newtab = mainWindow.gBrowser.addWebTab(uri);
+ let win = mainWindow.gBrowser.getBrowserForTab(newtab);
+ await new Promise(resolve => {
+ win.addEventListener("loadend", resolve, { once: true });
+ });
+ let didVerify = await this.shortWaitForVerification(10000);
+ mainWindow.gBrowser.removeTab(newtab);
+ return didVerify;
+ },
+
+ async _completeVerification(user) {
+ let username = this._getRestmailUsername(user);
+ if (!username) {
+ Logger.logInfo(
+ `Username "${user}" isn't a restmail username so can't complete verification`
+ );
+ return false;
+ }
+ Logger.logInfo("Fetching mail (from restmail) for user " + username);
+ let restmailURI = `https://www.restmail.net/mail/${encodeURIComponent(
+ username
+ )}`;
+ let triedAlready = new Set();
+ const tries = 10;
+ const normalWait = 2000;
+ for (let i = 0; i < tries; ++i) {
+ let resp = await fetch(restmailURI);
+ let messages = await resp.json();
+ // Sort so that the most recent emails are first.
+ messages.sort((a, b) => new Date(b.receivedAt) - new Date(a.receivedAt));
+ for (let m of messages) {
+ // We look for a link that has a x-link that we haven't yet tried.
+ if (!m.headers["x-link"] || triedAlready.has(m.headers["x-link"])) {
+ continue;
+ }
+ let confirmLink = m.headers["x-link"];
+ triedAlready.add(confirmLink);
+ Logger.logInfo("Trying confirmation link " + confirmLink);
+ try {
+ if (await this._openVerificationPage(confirmLink)) {
+ return true;
+ }
+ } catch (e) {
+ Logger.logInfo(
+ "Warning: Failed to follow confirmation link: " +
+ Log.exceptionStr(e)
+ );
+ }
+ }
+ if (i === 0) {
+ // first time through after failing we'll do this.
+ await fxAccounts.resendVerificationEmail();
+ }
+ if (await this.shortWaitForVerification(normalWait)) {
+ return true;
+ }
+ }
+ // One last try.
+ return this.shortWaitForVerification(normalWait);
+ },
+
+ async deleteEmail(user) {
+ let username = this._getRestmailUsername(user);
+ if (!username) {
+ Logger.logInfo("Not a restmail username, can't delete");
+ return false;
+ }
+ Logger.logInfo("Deleting mail (from restmail) for user " + username);
+ let restmailURI = `https://www.restmail.net/mail/${encodeURIComponent(
+ username
+ )}`;
+ try {
+ // Clean up after ourselves.
+ let deleteResult = await fetch(restmailURI, { method: "DELETE" });
+ if (!deleteResult.ok) {
+ Logger.logInfo(
+ `Warning: Got non-success status ${deleteResult.status} when deleting emails`
+ );
+ return false;
+ }
+ } catch (e) {
+ Logger.logInfo(
+ "Warning: Failed to delete old emails: " + Log.exceptionStr(e)
+ );
+ return false;
+ }
+ return true;
+ },
+
+ /**
+ * Wrapper to retrieve the currently signed in user
+ *
+ * @returns Information about the currently signed in user
+ */
+ async getSignedInUser() {
+ try {
+ return await fxAccounts.getSignedInUser();
+ } catch (error) {
+ Logger.logError(
+ "getSignedInUser() failed with: " + JSON.stringify(error)
+ );
+ throw error;
+ }
+ },
+
+ /**
+ * Wrapper to synchronize the login of a user
+ *
+ * @param account
+ * Account information of the user to login
+ * @param account.username
+ * The username for the account (utf8)
+ * @param account.password
+ * The user's password
+ */
+ async signIn(account) {
+ Logger.AssertTrue(account.username, "Username has been found");
+ Logger.AssertTrue(account.password, "Password has been found");
+
+ Logger.logInfo("Login user: " + account.username);
+
+ try {
+ // Required here since we don't go through the real login page
+ await FxAccountsConfig.ensureConfigured();
+
+ let client = new FxAccountsClient();
+ let credentials = await client.signIn(
+ account.username,
+ account.password,
+ true
+ );
+ await fxAccounts._internal.setSignedInUser(credentials);
+ if (!credentials.verified) {
+ await this._completeVerification(account.username);
+ }
+
+ return true;
+ } catch (error) {
+ throw new Error("signIn() failed with: " + error.message);
+ }
+ },
+
+ /**
+ * Sign out of Firefox Accounts.
+ */
+ async signOut() {
+ if (await Authentication.isLoggedIn()) {
+ // Note: This will clean up the device ID.
+ await fxAccounts.signOut();
+ }
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/logger.sys.mjs b/services/sync/tps/extensions/tps/resource/logger.sys.mjs
new file mode 100644
index 0000000000..a1995f88b6
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/logger.sys.mjs
@@ -0,0 +1,168 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ ChromeUtils.import() and acts as a singleton.
+ Only the following listed symbols will exposed on import, and only when
+ and where imported. */
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ ObjectUtils: "resource://gre/modules/ObjectUtils.sys.mjs",
+});
+
+export var Logger = {
+ _foStream: null,
+ _converter: null,
+ _potentialError: null,
+
+ init(path) {
+ if (this._converter != null) {
+ // we're already open!
+ return;
+ }
+
+ if (path) {
+ Services.prefs.setStringPref("tps.logfile", path);
+ } else {
+ path = Services.prefs.getStringPref("tps.logfile");
+ }
+
+ this._file = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ this._file.initWithPath(path);
+ var exists = this._file.exists();
+
+ // Make a file output stream and converter to handle it.
+ this._foStream = Cc[
+ "@mozilla.org/network/file-output-stream;1"
+ ].createInstance(Ci.nsIFileOutputStream);
+ // If the file already exists, append it, otherwise create it.
+ var fileflags = exists ? 0x02 | 0x08 | 0x10 : 0x02 | 0x08 | 0x20;
+
+ this._foStream.init(this._file, fileflags, 0o666, 0);
+ this._converter = Cc[
+ "@mozilla.org/intl/converter-output-stream;1"
+ ].createInstance(Ci.nsIConverterOutputStream);
+ this._converter.init(this._foStream, "UTF-8");
+ },
+
+ write(data) {
+ if (this._converter == null) {
+ console.error("TPS Logger.write called with _converter == null!");
+ return;
+ }
+ this._converter.writeString(data);
+ },
+
+ close() {
+ if (this._converter != null) {
+ this._converter.close();
+ this._converter = null;
+ this._foStream = null;
+ }
+ },
+
+ AssertTrue(bool, msg, showPotentialError) {
+ if (bool) {
+ return;
+ }
+
+ if (showPotentialError && this._potentialError) {
+ msg += "; " + this._potentialError;
+ this._potentialError = null;
+ }
+ throw new Error("ASSERTION FAILED! " + msg);
+ },
+
+ AssertFalse(bool, msg, showPotentialError) {
+ return this.AssertTrue(!bool, msg, showPotentialError);
+ },
+
+ AssertEqual(got, expected, msg) {
+ if (!lazy.ObjectUtils.deepEqual(got, expected)) {
+ throw new Error(
+ "ASSERTION FAILED! " +
+ msg +
+ "; expected " +
+ JSON.stringify(expected) +
+ ", got " +
+ JSON.stringify(got)
+ );
+ }
+ },
+
+ log(msg, withoutPrefix) {
+ dump(msg + "\n");
+ if (withoutPrefix) {
+ this.write(msg + "\n");
+ } else {
+ function pad(n, len) {
+ let s = "0000" + n;
+ return s.slice(-len);
+ }
+
+ let now = new Date();
+ let year = pad(now.getFullYear(), 4);
+ let month = pad(now.getMonth() + 1, 2);
+ let day = pad(now.getDate(), 2);
+ let hour = pad(now.getHours(), 2);
+ let minutes = pad(now.getMinutes(), 2);
+ let seconds = pad(now.getSeconds(), 2);
+ let ms = pad(now.getMilliseconds(), 3);
+
+ this.write(
+ year +
+ "-" +
+ month +
+ "-" +
+ day +
+ " " +
+ hour +
+ ":" +
+ minutes +
+ ":" +
+ seconds +
+ "." +
+ ms +
+ " " +
+ msg +
+ "\n"
+ );
+ }
+ },
+
+ clearPotentialError() {
+ this._potentialError = null;
+ },
+
+ logPotentialError(msg) {
+ this._potentialError = msg;
+ },
+
+ logLastPotentialError(msg) {
+ var message = msg;
+ if (this._potentialError) {
+ message = this._poentialError;
+ this._potentialError = null;
+ }
+ this.log("CROSSWEAVE ERROR: " + message);
+ },
+
+ logError(msg) {
+ this.log("CROSSWEAVE ERROR: " + msg);
+ },
+
+ logInfo(msg, withoutPrefix) {
+ if (withoutPrefix) {
+ this.log(msg, true);
+ } else {
+ this.log("CROSSWEAVE INFO: " + msg);
+ }
+ },
+
+ logPass(msg) {
+ this.log("CROSSWEAVE TEST PASS: " + msg);
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/modules/addons.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/addons.sys.mjs
new file mode 100644
index 0000000000..596f942a06
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/addons.sys.mjs
@@ -0,0 +1,93 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { AddonManager } from "resource://gre/modules/AddonManager.sys.mjs";
+import { AddonUtils } from "resource://services-sync/addonutils.sys.mjs";
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+export const STATE_ENABLED = 1;
+export const STATE_DISABLED = 2;
+
+export function Addon(TPS, id) {
+ this.TPS = TPS;
+ this.id = id;
+}
+
+Addon.prototype = {
+ addon: null,
+
+ async uninstall() {
+ // find our addon locally
+ let addon = await AddonManager.getAddonByID(this.id);
+ Logger.AssertTrue(
+ !!addon,
+ "could not find addon " + this.id + " to uninstall"
+ );
+ await AddonUtils.uninstallAddon(addon);
+ },
+
+ async find(state) {
+ let addon = await AddonManager.getAddonByID(this.id);
+
+ if (!addon) {
+ Logger.logInfo("Could not find add-on with ID: " + this.id);
+ return false;
+ }
+
+ this.addon = addon;
+
+ Logger.logInfo(
+ "add-on found: " + addon.id + ", enabled: " + !addon.userDisabled
+ );
+ if (state == STATE_ENABLED) {
+ Logger.AssertFalse(addon.userDisabled, "add-on is disabled: " + addon.id);
+ return true;
+ } else if (state == STATE_DISABLED) {
+ Logger.AssertTrue(addon.userDisabled, "add-on is enabled: " + addon.id);
+ return true;
+ } else if (state) {
+ throw new Error("Don't know how to handle state: " + state);
+ } else {
+ // No state, so just checking that it exists.
+ return true;
+ }
+ },
+
+ async install() {
+ // For Install, the id parameter initially passed is really the filename
+ // for the addon's install .xml; we'll read the actual id from the .xml.
+
+ const result = await AddonUtils.installAddons([
+ { id: this.id, requireSecureURI: false },
+ ]);
+
+ Logger.AssertEqual(
+ 1,
+ result.installedIDs.length,
+ "Exactly 1 add-on was installed."
+ );
+ Logger.AssertEqual(
+ this.id,
+ result.installedIDs[0],
+ "Add-on was installed successfully: " + this.id
+ );
+ },
+
+ async setEnabled(flag) {
+ Logger.AssertTrue(await this.find(), "Add-on is available.");
+
+ let userDisabled;
+ if (flag == STATE_ENABLED) {
+ userDisabled = false;
+ } else if (flag == STATE_DISABLED) {
+ userDisabled = true;
+ } else {
+ throw new Error("Unknown flag to setEnabled: " + flag);
+ }
+
+ AddonUtils.updateUserDisabled(this.addon, userDisabled);
+
+ return true;
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/modules/bookmarkValidator.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/bookmarkValidator.sys.mjs
new file mode 100644
index 0000000000..a7724c6aaa
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/bookmarkValidator.sys.mjs
@@ -0,0 +1,1063 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file was moved to tps from the main production code as it was unused
+// after removal of the non-mirror bookmarks engine.
+// It used to have a test before it was moved:
+// https://searchfox.org/mozilla-central/rev/b1a5802e0f73bfd6d2096e5fefc2b47831a50b2d/services/sync/tests/unit/test_bookmark_validator.js
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Async: "resource://services-common/async.sys.mjs",
+ PlacesSyncUtils: "resource://gre/modules/PlacesSyncUtils.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+});
+
+const QUERY_PROTOCOL = "place:";
+
+function areURLsEqual(a, b) {
+ if (a === b) {
+ return true;
+ }
+ if (a.startsWith(QUERY_PROTOCOL) != b.startsWith(QUERY_PROTOCOL)) {
+ return false;
+ }
+ // Tag queries are special because we rewrite them to point to the
+ // local tag folder ID. It's expected that the folders won't match,
+ // but all other params should.
+ let aParams = new URLSearchParams(a.slice(QUERY_PROTOCOL.length));
+ let aType = +aParams.get("type");
+ if (aType != Ci.nsINavHistoryQueryOptions.RESULTS_AS_TAG_CONTENTS) {
+ return false;
+ }
+ let bParams = new URLSearchParams(b.slice(QUERY_PROTOCOL.length));
+ let bType = +bParams.get("type");
+ if (bType != Ci.nsINavHistoryQueryOptions.RESULTS_AS_TAG_CONTENTS) {
+ return false;
+ }
+ let aKeys = new Set(aParams.keys());
+ let bKeys = new Set(bParams.keys());
+ if (aKeys.size != bKeys.size) {
+ return false;
+ }
+ // Tag queries shouldn't reference multiple folders, or named folders like
+ // "TOOLBAR" or "BOOKMARKS_MENU". Just in case, we make sure all folder IDs
+ // are numeric. If they are, we ignore them when comparing the query params.
+ if (aKeys.has("folder") && aParams.getAll("folder").every(isFinite)) {
+ aKeys.delete("folder");
+ }
+ if (bKeys.has("folder") && bParams.getAll("folder").every(isFinite)) {
+ bKeys.delete("folder");
+ }
+ for (let key of aKeys) {
+ if (!bKeys.has(key)) {
+ return false;
+ }
+ if (
+ !CommonUtils.arrayEqual(
+ aParams.getAll(key).sort(),
+ bParams.getAll(key).sort()
+ )
+ ) {
+ return false;
+ }
+ }
+ for (let key of bKeys) {
+ if (!aKeys.has(key)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+const BOOKMARK_VALIDATOR_VERSION = 1;
+
+/**
+ * Result of bookmark validation. Contains the following fields which describe
+ * server-side problems unless otherwise specified.
+ *
+ * - missingIDs (number): # of objects with missing ids
+ * - duplicates (array of ids): ids seen more than once
+ * - parentChildMismatches (array of {parent: parentid, child: childid}):
+ * instances where the child's parentid and the parent's children array
+ * do not match
+ * - cycles (array of array of ids). List of cycles found in the server-side tree.
+ * - clientCycles (array of array of ids). List of cycles found in the client-side tree.
+ * - orphans (array of {id: string, parent: string}): List of nodes with
+ * either no parentid, or where the parent could not be found.
+ * - missingChildren (array of {parent: id, child: id}):
+ * List of parent/children where the child id couldn't be found
+ * - deletedChildren (array of { parent: id, child: id }):
+ * List of parent/children where child id was a deleted item (but still showed up
+ * in the children array)
+ * - multipleParents (array of {child: id, parents: array of ids}):
+ * List of children that were part of multiple parent arrays
+ * - deletedParents (array of ids) : List of records that aren't deleted but
+ * had deleted parents
+ * - childrenOnNonFolder (array of ids): list of non-folders that still have
+ * children arrays
+ * - duplicateChildren (array of ids): list of records who have the same
+ * child listed multiple times in their children array
+ * - parentNotFolder (array of ids): list of records that have parents that
+ * aren't folders
+ * - rootOnServer (boolean): true if the root came from the server
+ * - badClientRoots (array of ids): Contains any client-side root ids where
+ * the root is missing or isn't a (direct) child of the places root.
+ *
+ * - clientMissing: Array of ids on the server missing from the client
+ * - serverMissing: Array of ids on the client missing from the server
+ * - serverDeleted: Array of ids on the client that the server had marked as deleted.
+ * - serverUnexpected: Array of ids that appear on the server but shouldn't
+ * because the client attempts to never upload them.
+ * - differences: Array of {id: string, differences: string array} recording
+ * the non-structural properties that are differente between the client and server
+ * - structuralDifferences: As above, but contains the items where the differences were
+ * structural, that is, they contained childGUIDs or parentid
+ */
+export class BookmarkProblemData {
+ constructor() {
+ this.rootOnServer = false;
+ this.missingIDs = 0;
+
+ this.duplicates = [];
+ this.parentChildMismatches = [];
+ this.cycles = [];
+ this.clientCycles = [];
+ this.orphans = [];
+ this.missingChildren = [];
+ this.deletedChildren = [];
+ this.multipleParents = [];
+ this.deletedParents = [];
+ this.childrenOnNonFolder = [];
+ this.duplicateChildren = [];
+ this.parentNotFolder = [];
+
+ this.badClientRoots = [];
+ this.clientMissing = [];
+ this.serverMissing = [];
+ this.serverDeleted = [];
+ this.serverUnexpected = [];
+ this.differences = [];
+ this.structuralDifferences = [];
+ }
+
+ /**
+ * Convert ("difference", [{ differences: ["tags", "name"] }, { differences: ["name"] }]) into
+ * [{ name: "difference:tags", count: 1}, { name: "difference:name", count: 2 }], etc.
+ */
+ _summarizeDifferences(prefix, diffs) {
+ let diffCounts = new Map();
+ for (let { differences } of diffs) {
+ for (let type of differences) {
+ let name = prefix + ":" + type;
+ let count = diffCounts.get(name) || 0;
+ diffCounts.set(name, count + 1);
+ }
+ }
+ return [...diffCounts].map(([name, count]) => ({ name, count }));
+ }
+
+ /**
+ * Produce a list summarizing problems found. Each entry contains {name, count},
+ * where name is the field name for the problem, and count is the number of times
+ * the problem was encountered.
+ *
+ * Validation has failed if all counts are not 0.
+ *
+ * If the `full` argument is truthy, we also include information about which
+ * properties we saw structural differences in. Currently, this means either
+ * "sdiff:parentid" and "sdiff:childGUIDS" may be present.
+ */
+ getSummary(full) {
+ let result = [
+ { name: "clientMissing", count: this.clientMissing.length },
+ { name: "serverMissing", count: this.serverMissing.length },
+ { name: "serverDeleted", count: this.serverDeleted.length },
+ { name: "serverUnexpected", count: this.serverUnexpected.length },
+
+ {
+ name: "structuralDifferences",
+ count: this.structuralDifferences.length,
+ },
+ { name: "differences", count: this.differences.length },
+
+ { name: "missingIDs", count: this.missingIDs },
+ { name: "rootOnServer", count: this.rootOnServer ? 1 : 0 },
+
+ { name: "duplicates", count: this.duplicates.length },
+ {
+ name: "parentChildMismatches",
+ count: this.parentChildMismatches.length,
+ },
+ { name: "cycles", count: this.cycles.length },
+ { name: "clientCycles", count: this.clientCycles.length },
+ { name: "badClientRoots", count: this.badClientRoots.length },
+ { name: "orphans", count: this.orphans.length },
+ { name: "missingChildren", count: this.missingChildren.length },
+ { name: "deletedChildren", count: this.deletedChildren.length },
+ { name: "multipleParents", count: this.multipleParents.length },
+ { name: "deletedParents", count: this.deletedParents.length },
+ { name: "childrenOnNonFolder", count: this.childrenOnNonFolder.length },
+ { name: "duplicateChildren", count: this.duplicateChildren.length },
+ { name: "parentNotFolder", count: this.parentNotFolder.length },
+ ];
+ if (full) {
+ let structural = this._summarizeDifferences(
+ "sdiff",
+ this.structuralDifferences
+ );
+ result.push.apply(result, structural);
+ }
+ return result;
+ }
+}
+
+// Defined lazily to avoid initializing PlacesUtils.bookmarks too soon.
+ChromeUtils.defineLazyGetter(lazy, "SYNCED_ROOTS", () => [
+ lazy.PlacesUtils.bookmarks.menuGuid,
+ lazy.PlacesUtils.bookmarks.toolbarGuid,
+ lazy.PlacesUtils.bookmarks.unfiledGuid,
+ lazy.PlacesUtils.bookmarks.mobileGuid,
+]);
+
+// Maps root GUIDs to their query folder names from
+// toolkit/components/places/nsNavHistoryQuery.cpp. We follow queries that
+// reference existing folders in the client tree, and detect cycles where a
+// query references its containing folder.
+ChromeUtils.defineLazyGetter(lazy, "ROOT_GUID_TO_QUERY_FOLDER_NAME", () => ({
+ [lazy.PlacesUtils.bookmarks.rootGuid]: "PLACES_ROOT",
+ [lazy.PlacesUtils.bookmarks.menuGuid]: "BOOKMARKS_MENU",
+
+ // Tags should never show up in our client tree, and never form cycles, but we
+ // report them just in case.
+ [lazy.PlacesUtils.bookmarks.tagsGuid]: "TAGS",
+
+ [lazy.PlacesUtils.bookmarks.unfiledGuid]: "UNFILED_BOOKMARKS",
+ [lazy.PlacesUtils.bookmarks.toolbarGuid]: "TOOLBAR",
+ [lazy.PlacesUtils.bookmarks.mobileGuid]: "MOBILE_BOOKMARKS",
+}));
+
+async function detectCycles(records) {
+ // currentPath and pathLookup contain the same data. pathLookup is faster to
+ // query, but currentPath gives is the order of traversal that we need in
+ // order to report the members of the cycles.
+ let pathLookup = new Set();
+ let currentPath = [];
+ let cycles = [];
+ let seenEver = new Set();
+ const yieldState = lazy.Async.yieldState();
+
+ const traverse = async node => {
+ if (pathLookup.has(node)) {
+ let cycleStart = currentPath.lastIndexOf(node);
+ let cyclePath = currentPath.slice(cycleStart).map(n => n.id);
+ cycles.push(cyclePath);
+ return;
+ } else if (seenEver.has(node)) {
+ // If we're checking the server, this is a problem, but it should already be reported.
+ // On the client, this could happen due to including `node.concrete` in the child list.
+ return;
+ }
+ seenEver.add(node);
+ let children = node.children || [];
+ if (node.concreteItems) {
+ children.push(...node.concreteItems);
+ }
+ if (children.length) {
+ pathLookup.add(node);
+ currentPath.push(node);
+ await lazy.Async.yieldingForEach(children, traverse, yieldState);
+ currentPath.pop();
+ pathLookup.delete(node);
+ }
+ };
+
+ await lazy.Async.yieldingForEach(
+ records,
+ async record => {
+ if (!seenEver.has(record)) {
+ await traverse(record);
+ }
+ },
+ yieldState
+ );
+
+ return cycles;
+}
+
+class ServerRecordInspection {
+ constructor() {
+ this.serverRecords = null;
+ this.liveRecords = [];
+
+ this.folders = [];
+
+ this.root = null;
+
+ this.idToRecord = new Map();
+
+ this.deletedIds = new Set();
+ this.deletedRecords = [];
+
+ this.problemData = new BookmarkProblemData();
+
+ // These are handled outside of problemData
+ this._orphans = new Map();
+ this._multipleParents = new Map();
+
+ this.yieldState = lazy.Async.yieldState();
+ }
+
+ static async create(records) {
+ return new ServerRecordInspection().performInspection(records);
+ }
+
+ async performInspection(records) {
+ await this._setRecords(records);
+ await this._linkParentIDs();
+ await this._linkChildren();
+ await this._findOrphans();
+ await this._finish();
+ return this;
+ }
+
+ // We don't set orphans in this.problemData. Instead, we walk the tree at the
+ // end to find unreachable items.
+ _noteOrphan(id, parentId = undefined) {
+ // This probably shouldn't be called with a parentId twice, but if it
+ // happens we take the most recent one.
+ if (parentId || !this._orphans.has(id)) {
+ this._orphans.set(id, parentId);
+ }
+ }
+
+ noteParent(child, parent) {
+ let parents = this._multipleParents.get(child);
+ if (!parents) {
+ this._multipleParents.set(child, [parent]);
+ } else {
+ parents.push(parent);
+ }
+ }
+
+ noteMismatch(child, parent) {
+ let exists = this.problemData.parentChildMismatches.some(
+ match => match.child == child && match.parent == parent
+ );
+ if (!exists) {
+ this.problemData.parentChildMismatches.push({ child, parent });
+ }
+ }
+
+ // - Populates `this.deletedIds`, `this.folders`, and `this.idToRecord`
+ // - calls `_initRoot` (thus initializing `this.root`).
+ async _setRecords(records) {
+ if (this.serverRecords) {
+ // In general this class is expected to be created, have
+ // `performInspection` called, and then only read from from that point on.
+ throw new Error("Bug: ServerRecordInspection can't `setRecords` twice");
+ }
+ this.serverRecords = records;
+ let rootChildren = [];
+
+ await lazy.Async.yieldingForEach(
+ this.serverRecords,
+ async record => {
+ if (!record.id) {
+ ++this.problemData.missingIDs;
+ return;
+ }
+
+ if (record.deleted) {
+ this.deletedIds.add(record.id);
+ }
+ if (this.idToRecord.has(record.id)) {
+ this.problemData.duplicates.push(record.id);
+ return;
+ }
+
+ this.idToRecord.set(record.id, record);
+
+ if (!record.deleted) {
+ this.liveRecords.push(record);
+
+ if (record.parentid == "places") {
+ rootChildren.push(record);
+ }
+ }
+
+ if (!record.children) {
+ return;
+ }
+
+ if (record.type != "folder") {
+ // Due to implementation details in engines/bookmarks.js, (Livemark
+ // subclassing BookmarkFolder) Livemarks will have a children array,
+ // but it should still be empty.
+ if (!record.children.length) {
+ return;
+ }
+ // Otherwise we mark it as an error and still try to resolve the children
+ this.problemData.childrenOnNonFolder.push(record.id);
+ }
+
+ this.folders.push(record);
+
+ if (new Set(record.children).size !== record.children.length) {
+ this.problemData.duplicateChildren.push(record.id);
+ }
+
+ // After we're through with them, folder records store 3 (ugh) arrays that
+ // represent their folder information. The final fields looks like:
+ //
+ // - childGUIDs: The original `children` array, which is an array of
+ // record IDs.
+ //
+ // - unfilteredChildren: Contains more or less `childGUIDs.map(id =>
+ // idToRecord.get(id))`, without the nulls for missing children. It will
+ // still have deleted, duplicate, mismatching, etc. children.
+ //
+ // - children: This is the 'cleaned' version of the child records that are
+ // safe to iterate over, etc.. If there are no reported problems, it should
+ // be identical to unfilteredChildren.
+ //
+ // The last two are left alone until later `this._linkChildren`, however.
+ record.childGUIDs = record.children;
+
+ await lazy.Async.yieldingForEach(
+ record.childGUIDs,
+ id => {
+ this.noteParent(id, record.id);
+ },
+ this.yieldState
+ );
+
+ record.children = [];
+ },
+ this.yieldState
+ );
+
+ // Finish up some parts we can easily do now that we have idToRecord.
+ this.deletedRecords = Array.from(this.deletedIds, id =>
+ this.idToRecord.get(id)
+ );
+
+ this._initRoot(rootChildren);
+ }
+
+ _initRoot(rootChildren) {
+ let serverRoot = this.idToRecord.get("places");
+ if (serverRoot) {
+ this.root = serverRoot;
+ this.problemData.rootOnServer = true;
+ return;
+ }
+
+ // Fabricate a root. We want to be able to remember that it's fake, but
+ // would like to avoid it needing too much special casing, so we come up
+ // with children for it too (we just get these while we're iterating over
+ // the records to avoid needing two passes over a potentially large number
+ // of records).
+
+ this.root = {
+ id: "places",
+ fake: true,
+ children: rootChildren,
+ childGUIDs: rootChildren.map(record => record.id),
+ type: "folder",
+ title: "",
+ };
+ this.liveRecords.push(this.root);
+ this.idToRecord.set("places", this.root);
+ }
+
+ // Adds `parent` to all records it can that have `parentid`
+ async _linkParentIDs() {
+ await lazy.Async.yieldingForEach(
+ this.idToRecord,
+ ([id, record]) => {
+ if (record == this.root || record.deleted) {
+ return false;
+ }
+
+ // Check and update our orphan map.
+ let parentID = record.parentid;
+ let parent = this.idToRecord.get(parentID);
+ if (!parentID || !parent) {
+ this._noteOrphan(id, parentID);
+ return false;
+ }
+
+ record.parent = parent;
+
+ if (parent.deleted) {
+ this.problemData.deletedParents.push(id);
+ return true;
+ } else if (parent.type != "folder") {
+ this.problemData.parentNotFolder.push(record.id);
+ return true;
+ }
+
+ if (parent.id !== "place" || this.problemData.rootOnServer) {
+ if (!parent.childGUIDs.includes(record.id)) {
+ this.noteMismatch(record.id, parent.id);
+ }
+ }
+
+ if (parent.deleted && !record.deleted) {
+ this.problemData.deletedParents.push(record.id);
+ }
+
+ // Note: We used to check if the parentName on the server matches the
+ // actual local parent name, but given this is used only for de-duping a
+ // record the first time it is seen and expensive to keep up-to-date, we
+ // decided to just stop recording it. See bug 1276969 for more.
+ return false;
+ },
+ this.yieldState
+ );
+ }
+
+ // Build the children and unfilteredChildren arrays, (which are of record
+ // objects, not ids)
+ async _linkChildren() {
+ // Check that we aren't missing any children.
+ await lazy.Async.yieldingForEach(
+ this.folders,
+ async folder => {
+ folder.children = [];
+ folder.unfilteredChildren = [];
+
+ let idsThisFolder = new Set();
+
+ await lazy.Async.yieldingForEach(
+ folder.childGUIDs,
+ childID => {
+ let child = this.idToRecord.get(childID);
+
+ if (!child) {
+ this.problemData.missingChildren.push({
+ parent: folder.id,
+ child: childID,
+ });
+ return;
+ }
+
+ if (child.deleted) {
+ this.problemData.deletedChildren.push({
+ parent: folder.id,
+ child: childID,
+ });
+ return;
+ }
+
+ if (child.parentid != folder.id) {
+ this.noteMismatch(childID, folder.id);
+ return;
+ }
+
+ if (idsThisFolder.has(childID)) {
+ // Already recorded earlier, we just don't want to mess up `children`
+ return;
+ }
+ folder.children.push(child);
+ },
+ this.yieldState
+ );
+ },
+ this.yieldState
+ );
+ }
+
+ // Finds the orphans in the tree using something similar to a `mark and sweep`
+ // strategy. That is, we iterate over the children from the root, remembering
+ // which items we've seen. Then, we iterate all items, and know the ones we
+ // haven't seen are orphans.
+ async _findOrphans() {
+ let seen = new Set([this.root.id]);
+
+ const inCycle = await lazy.Async.yieldingForEach(
+ Utils.walkTree(this.root),
+ ([node]) => {
+ if (seen.has(node.id)) {
+ // We're in an infloop due to a cycle.
+ // Return early to avoid reporting false positives for orphans.
+ return true;
+ }
+ seen.add(node.id);
+
+ return false;
+ },
+ this.yieldState
+ );
+
+ if (inCycle) {
+ return;
+ }
+
+ await lazy.Async.yieldingForEach(
+ this.liveRecords,
+ (record, i) => {
+ if (!seen.has(record.id)) {
+ // We intentionally don't record the parentid here, since we only record
+ // that if the record refers to a parent that doesn't exist, which we
+ // have already handled (when linking parentid's).
+ this._noteOrphan(record.id);
+ }
+ },
+ this.yieldState
+ );
+
+ await lazy.Async.yieldingForEach(
+ this._orphans,
+ ([id, parent]) => {
+ this.problemData.orphans.push({ id, parent });
+ },
+ this.yieldState
+ );
+ }
+
+ async _finish() {
+ this.problemData.cycles = await detectCycles(this.liveRecords);
+
+ for (const [child, recordedParents] of this._multipleParents) {
+ let parents = new Set(recordedParents);
+ if (parents.size > 1) {
+ this.problemData.multipleParents.push({ child, parents: [...parents] });
+ }
+ }
+ // Dedupe simple arrays in the problem data, so that we don't have to worry
+ // about it in the code
+ const idArrayProps = [
+ "duplicates",
+ "deletedParents",
+ "childrenOnNonFolder",
+ "duplicateChildren",
+ "parentNotFolder",
+ ];
+ for (let prop of idArrayProps) {
+ this.problemData[prop] = [...new Set(this.problemData[prop])];
+ }
+ }
+}
+
+export class BookmarkValidator {
+ constructor() {
+ this.yieldState = lazy.Async.yieldState();
+ }
+
+ async canValidate() {
+ return !(await lazy.PlacesSyncUtils.bookmarks.havePendingChanges());
+ }
+
+ async _followQueries(recordsByQueryId) {
+ await lazy.Async.yieldingForEach(
+ recordsByQueryId.values(),
+ entry => {
+ if (
+ entry.type !== "query" &&
+ (!entry.bmkUri || !entry.bmkUri.startsWith(QUERY_PROTOCOL))
+ ) {
+ return;
+ }
+ let params = new URLSearchParams(
+ entry.bmkUri.slice(QUERY_PROTOCOL.length)
+ );
+ // Queries with `excludeQueries` won't form cycles because they'll
+ // exclude all queries, including themselves, from the result set.
+ let excludeQueries = params.get("excludeQueries");
+ if (excludeQueries === "1" || excludeQueries === "true") {
+ // `nsNavHistoryQuery::ParseQueryBooleanString` allows `1` and `true`.
+ return;
+ }
+ entry.concreteItems = [];
+ let queryIds = params.getAll("folder");
+ for (let queryId of queryIds) {
+ let concreteItem = recordsByQueryId.get(queryId);
+ if (concreteItem) {
+ entry.concreteItems.push(concreteItem);
+ }
+ }
+ },
+ this.yieldState
+ );
+ }
+
+ async createClientRecordsFromTree(clientTree) {
+ // Iterate over the treeNode, converting it to something more similar to what
+ // the server stores.
+ let records = [];
+ // A map of local IDs and well-known query folder names to records. Unlike
+ // GUIDs, local IDs aren't synced, since they're not stable across devices.
+ // New Places APIs use GUIDs to refer to bookmarks, but the legacy APIs
+ // still use local IDs. We use this mapping to parse `place:` queries that
+ // refer to folders via their local IDs.
+ let recordsByQueryId = new Map();
+ let syncedRoots = lazy.SYNCED_ROOTS;
+
+ const traverse = async (treeNode, synced) => {
+ if (!synced) {
+ synced = syncedRoots.includes(treeNode.guid);
+ }
+ let localId = treeNode.id;
+ let guid = lazy.PlacesSyncUtils.bookmarks.guidToRecordId(treeNode.guid);
+ let itemType = "item";
+ treeNode.ignored = !synced;
+ treeNode.id = guid;
+ switch (treeNode.type) {
+ case lazy.PlacesUtils.TYPE_X_MOZ_PLACE:
+ if (treeNode.uri.startsWith(QUERY_PROTOCOL)) {
+ itemType = "query";
+ } else {
+ itemType = "bookmark";
+ }
+ break;
+ case lazy.PlacesUtils.TYPE_X_MOZ_PLACE_CONTAINER:
+ let isLivemark = false;
+ if (treeNode.annos) {
+ for (let anno of treeNode.annos) {
+ if (anno.name === lazy.PlacesUtils.LMANNO_FEEDURI) {
+ isLivemark = true;
+ treeNode.feedUri = anno.value;
+ } else if (anno.name === lazy.PlacesUtils.LMANNO_SITEURI) {
+ isLivemark = true;
+ treeNode.siteUri = anno.value;
+ }
+ }
+ }
+ itemType = isLivemark ? "livemark" : "folder";
+ break;
+ case lazy.PlacesUtils.TYPE_X_MOZ_PLACE_SEPARATOR:
+ itemType = "separator";
+ break;
+ }
+
+ if (treeNode.tags) {
+ treeNode.tags = treeNode.tags.split(",");
+ } else {
+ treeNode.tags = [];
+ }
+ treeNode.type = itemType;
+ treeNode.pos = treeNode.index;
+ treeNode.bmkUri = treeNode.uri;
+ records.push(treeNode);
+ if (treeNode.guid in lazy.ROOT_GUID_TO_QUERY_FOLDER_NAME) {
+ let queryId = lazy.ROOT_GUID_TO_QUERY_FOLDER_NAME[treeNode.guid];
+ recordsByQueryId.set(queryId, treeNode);
+ }
+ if (localId) {
+ // Always add the ID, since it's still possible for a query to
+ // reference a root without using the well-known name. For example,
+ // `place:folder=${PlacesUtils.mobileFolderId}` and
+ // `place:folder=MOBILE_BOOKMARKS` are equivalent.
+ recordsByQueryId.set(localId.toString(10), treeNode);
+ }
+ if (treeNode.type === "folder") {
+ treeNode.childGUIDs = [];
+ if (!treeNode.children) {
+ treeNode.children = [];
+ }
+
+ await lazy.Async.yieldingForEach(
+ treeNode.children,
+ async child => {
+ await traverse(child, synced);
+ child.parent = treeNode;
+ child.parentid = guid;
+ treeNode.childGUIDs.push(child.guid);
+ },
+ this.yieldState
+ );
+ }
+ };
+
+ await traverse(clientTree, false);
+
+ clientTree.id = "places";
+ await this._followQueries(recordsByQueryId);
+ return records;
+ }
+
+ /**
+ * Process the server-side list. Mainly this builds the records into a tree,
+ * but it also records information about problems, and produces arrays of the
+ * deleted and non-deleted nodes.
+ *
+ * Returns an object containing:
+ * - records:Array of non-deleted records. Each record contains the following
+ * properties
+ * - childGUIDs (array of strings, only present if type is 'folder'): the
+ * list of child GUIDs stored on the server.
+ * - children (array of records, only present if type is 'folder'):
+ * each record has these same properties. This may differ in content
+ * from what you may expect from the childGUIDs list, as it won't
+ * contain any records that could not be found.
+ * - parent (record): The parent to this record.
+ * - Unchanged properties send down from the server: id, title, type,
+ * parentName, parentid, bmkURI, keyword, tags, pos, queryId
+ * - root: Root of the server-side bookmark tree. Has the same properties as
+ * above.
+ * - deletedRecords: As above, but only contains items that the server sent
+ * where it also sent indication that the item should be deleted.
+ * - problemData: a BookmarkProblemData object, with the caveat that
+ * the fields describing client/server relationship will not have been filled
+ * out yet.
+ */
+ async inspectServerRecords(serverRecords) {
+ const data = await ServerRecordInspection.create(serverRecords);
+ return {
+ deletedRecords: data.deletedRecords,
+ records: data.liveRecords,
+ problemData: data.problemData,
+ root: data.root,
+ };
+ }
+
+ // Perform client-side sanity checking that doesn't involve server data
+ async _validateClient(problemData, clientRecords) {
+ problemData.clientCycles = await detectCycles(clientRecords);
+ for (let rootGUID of lazy.SYNCED_ROOTS) {
+ let record = clientRecords.find(record => record.guid === rootGUID);
+ if (!record || record.parentid !== "places") {
+ problemData.badClientRoots.push(rootGUID);
+ }
+ }
+ }
+
+ async _computeUnifiedRecordMap(serverRecords, clientRecords) {
+ let allRecords = new Map();
+ await lazy.Async.yieldingForEach(
+ serverRecords,
+ sr => {
+ if (sr.fake) {
+ return;
+ }
+ allRecords.set(sr.id, { client: null, server: sr });
+ },
+ this.yieldState
+ );
+
+ await lazy.Async.yieldingForEach(
+ clientRecords,
+ cr => {
+ let unified = allRecords.get(cr.id);
+ if (!unified) {
+ allRecords.set(cr.id, { client: cr, server: null });
+ } else {
+ unified.client = cr;
+ }
+ },
+ this.yieldState
+ );
+
+ return allRecords;
+ }
+
+ _recordMissing(problems, id, clientRecord, serverRecord, serverTombstones) {
+ if (!clientRecord && serverRecord) {
+ problems.clientMissing.push(id);
+ }
+ if (!serverRecord && clientRecord) {
+ if (serverTombstones.has(id)) {
+ problems.serverDeleted.push(id);
+ } else if (!clientRecord.ignored && clientRecord.id != "places") {
+ problems.serverMissing.push(id);
+ }
+ }
+ }
+
+ _compareRecords(client, server) {
+ let structuralDifferences = [];
+ let differences = [];
+
+ // Don't bother comparing titles of roots. It's okay if locally it's
+ // "Mobile Bookmarks", but the server thinks it's "mobile".
+ // TODO: We probably should be handing other localized bookmarks (e.g.
+ // default bookmarks) here as well, see bug 1316041.
+ if (!lazy.SYNCED_ROOTS.includes(client.guid)) {
+ // We want to treat undefined, null and an empty string as identical
+ if ((client.title || "") !== (server.title || "")) {
+ differences.push("title");
+ }
+ }
+
+ if (client.parentid || server.parentid) {
+ if (client.parentid !== server.parentid) {
+ structuralDifferences.push("parentid");
+ }
+ }
+
+ if (client.tags || server.tags) {
+ let cl = client.tags ? [...client.tags].sort() : [];
+ let sl = server.tags ? [...server.tags].sort() : [];
+ if (!CommonUtils.arrayEqual(cl, sl)) {
+ differences.push("tags");
+ }
+ }
+
+ let sameType = client.type === server.type;
+ if (!sameType) {
+ if (
+ server.type === "query" &&
+ client.type === "bookmark" &&
+ client.bmkUri.startsWith(QUERY_PROTOCOL)
+ ) {
+ sameType = true;
+ }
+ }
+
+ if (!sameType) {
+ differences.push("type");
+ } else {
+ switch (server.type) {
+ case "bookmark":
+ case "query":
+ if (!areURLsEqual(server.bmkUri, client.bmkUri)) {
+ differences.push("bmkUri");
+ }
+ break;
+ case "separator":
+ if (server.pos != client.pos) {
+ differences.push("pos");
+ }
+ break;
+ case "livemark":
+ if (server.feedUri != client.feedUri) {
+ differences.push("feedUri");
+ }
+ if (server.siteUri != client.siteUri) {
+ differences.push("siteUri");
+ }
+ break;
+ case "folder":
+ if (server.id === "places" && server.fake) {
+ // It's the fabricated places root. It won't have the GUIDs, but
+ // it doesn't matter.
+ break;
+ }
+ if (client.childGUIDs || server.childGUIDs) {
+ let cl = client.childGUIDs || [];
+ let sl = server.childGUIDs || [];
+ if (!CommonUtils.arrayEqual(cl, sl)) {
+ structuralDifferences.push("childGUIDs");
+ }
+ }
+ break;
+ }
+ }
+ return { differences, structuralDifferences };
+ }
+
+ /**
+ * Compare the list of server records with the client tree.
+ *
+ * Returns the same data as described in the inspectServerRecords comment,
+ * with the following additional fields.
+ * - clientRecords: an array of client records in a similar format to
+ * the .records (ie, server records) entry.
+ * - problemData is the same as for inspectServerRecords, except all properties
+ * will be filled out.
+ */
+ async compareServerWithClient(serverRecords, clientTree) {
+ let clientRecords = await this.createClientRecordsFromTree(clientTree);
+ let inspectionInfo = await this.inspectServerRecords(serverRecords);
+ inspectionInfo.clientRecords = clientRecords;
+
+ // Mainly do this to remove deleted items and normalize child guids.
+ serverRecords = inspectionInfo.records;
+ let problemData = inspectionInfo.problemData;
+
+ await this._validateClient(problemData, clientRecords);
+
+ let allRecords = await this._computeUnifiedRecordMap(
+ serverRecords,
+ clientRecords
+ );
+
+ let serverDeleted = new Set(inspectionInfo.deletedRecords.map(r => r.id));
+
+ await lazy.Async.yieldingForEach(
+ allRecords,
+ ([id, { client, server }]) => {
+ if (!client || !server) {
+ this._recordMissing(problemData, id, client, server, serverDeleted);
+ return;
+ }
+ if (server && client && client.ignored) {
+ problemData.serverUnexpected.push(id);
+ }
+ let { differences, structuralDifferences } = this._compareRecords(
+ client,
+ server
+ );
+
+ if (differences.length) {
+ problemData.differences.push({ id, differences });
+ }
+ if (structuralDifferences.length) {
+ problemData.structuralDifferences.push({
+ id,
+ differences: structuralDifferences,
+ });
+ }
+ },
+ this.yieldState
+ );
+
+ return inspectionInfo;
+ }
+
+ async _getServerState(engine) {
+ let collection = engine.itemSource();
+ let collectionKey = engine.service.collectionKeys.keyForCollection(
+ engine.name
+ );
+ collection.full = true;
+ let result = await collection.getBatched();
+ if (!result.response.success) {
+ throw result.response;
+ }
+ let cleartexts = [];
+ await lazy.Async.yieldingForEach(
+ result.records,
+ async record => {
+ await record.decrypt(collectionKey);
+ cleartexts.push(record.cleartext);
+ },
+ this.yieldState
+ );
+ return cleartexts;
+ }
+
+ async validate(engine) {
+ let start = Date.now();
+ let clientTree = await lazy.PlacesUtils.promiseBookmarksTree("", {
+ includeItemIds: true,
+ });
+ let serverState = await this._getServerState(engine);
+ let serverRecordCount = serverState.length;
+ let result = await this.compareServerWithClient(serverState, clientTree);
+ let end = Date.now();
+ let duration = end - start;
+
+ engine._log.debug(`Validated bookmarks in ${duration}ms`);
+ engine._log.debug(`Problem summary`);
+ for (let { name, count } of result.problemData.getSummary()) {
+ engine._log.debug(` ${name}: ${count}`);
+ }
+
+ return {
+ duration,
+ version: this.version,
+ problems: result.problemData,
+ recordCount: serverRecordCount,
+ };
+ }
+}
+
+BookmarkValidator.prototype.version = BOOKMARK_VALIDATOR_VERSION;
diff --git a/services/sync/tps/extensions/tps/resource/modules/bookmarks.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/bookmarks.sys.mjs
new file mode 100644
index 0000000000..e4aac948b5
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/bookmarks.sys.mjs
@@ -0,0 +1,833 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ * ChromeUtils.import() and acts as a singleton. Only the following
+ * listed symbols will exposed on import, and only when and where imported.
+ */
+
+import { PlacesBackups } from "resource://gre/modules/PlacesBackups.sys.mjs";
+
+import { PlacesSyncUtils } from "resource://gre/modules/PlacesSyncUtils.sys.mjs";
+import { PlacesUtils } from "resource://gre/modules/PlacesUtils.sys.mjs";
+
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+export async function DumpBookmarks() {
+ let [bookmarks] = await PlacesBackups.getBookmarksTree();
+ Logger.logInfo(
+ "Dumping Bookmarks...\n" + JSON.stringify(bookmarks, undefined, 2) + "\n\n"
+ );
+}
+
+/**
+ * extend, causes a child object to inherit from a parent
+ */
+function extend(child, supertype) {
+ Object.setPrototypeOf(child.prototype, supertype.prototype);
+}
+/**
+ * PlacesItemProps object, holds properties for places items
+ */
+function PlacesItemProps(props) {
+ this.location = null;
+ this.uri = null;
+ this.keyword = null;
+ this.title = null;
+ this.after = null;
+ this.before = null;
+ this.folder = null;
+ this.position = null;
+ this.delete = false;
+ this.tags = null;
+ this.last_item_pos = null;
+ this.type = null;
+
+ for (var prop in props) {
+ if (prop in this) {
+ this[prop] = props[prop];
+ }
+ }
+}
+
+/**
+ * PlacesItem object. Base class for places items.
+ */
+export function PlacesItem(props) {
+ this.props = new PlacesItemProps(props);
+ if (this.props.location == null) {
+ this.props.location = "menu";
+ }
+ if ("changes" in props) {
+ this.updateProps = new PlacesItemProps(props.changes);
+ } else {
+ this.updateProps = null;
+ }
+}
+
+/**
+ * Instance methods for generic places items.
+ */
+PlacesItem.prototype = {
+ // an array of possible root folders for places items
+ _bookmarkFolders: {
+ places: PlacesUtils.bookmarks.rootGuid,
+ menu: PlacesUtils.bookmarks.menuGuid,
+ tags: PlacesUtils.bookmarks.tagsGuid,
+ unfiled: PlacesUtils.bookmarks.unfiledGuid,
+ toolbar: PlacesUtils.bookmarks.toolbarGuid,
+ mobile: PlacesUtils.bookmarks.mobileGuid,
+ },
+
+ _typeMap: new Map([
+ [PlacesUtils.TYPE_X_MOZ_PLACE_CONTAINER, PlacesUtils.bookmarks.TYPE_FOLDER],
+ [
+ PlacesUtils.TYPE_X_MOZ_PLACE_SEPARATOR,
+ PlacesUtils.bookmarks.TYPE_SEPARATOR,
+ ],
+ [PlacesUtils.TYPE_X_MOZ_PLACE, PlacesUtils.bookmarks.TYPE_BOOKMARK],
+ ]),
+
+ toString() {
+ var that = this;
+ var props = ["uri", "title", "location", "folder"];
+ var string =
+ (this.props.type ? this.props.type + " " : "") +
+ "(" +
+ (function () {
+ var ret = [];
+ for (var i in props) {
+ if (that.props[props[i]]) {
+ ret.push(props[i] + ": " + that.props[props[i]]);
+ }
+ }
+ return ret;
+ })().join(", ") +
+ ")";
+ return string;
+ },
+
+ /**
+ * GetPlacesChildGuid
+ *
+ * Finds the guid of the an item with the specified properties in the places
+ * database under the specified parent.
+ *
+ * @param folder The guid of the folder to search
+ * @param type The type of the item to find, or null to match any item;
+ * this is one of the PlacesUtils.bookmarks.TYPE_* values
+ * @param title The title of the item to find, or null to match any title
+ * @param uri The uri of the item to find, or null to match any uri
+ *
+ * @return the node id if the item was found, otherwise null
+ */
+ async GetPlacesChildGuid(folder, type, title, uri) {
+ let children = (await PlacesUtils.promiseBookmarksTree(folder)).children;
+ if (!children) {
+ return null;
+ }
+ let guid = null;
+ for (let node of children) {
+ if (node.title == title) {
+ let nodeType = this._typeMap.get(node.type);
+ if (type == null || type == undefined || nodeType == type) {
+ if (uri == undefined || uri == null || node.uri.spec == uri.spec) {
+ // Note that this is suspect as we return the *last* matching
+ // child, which some tests rely on (ie, an early-return here causes
+ // at least 1 test to fail). But that's a yak for another day.
+ guid = node.guid;
+ }
+ }
+ }
+ }
+ return guid;
+ },
+
+ /**
+ * IsAdjacentTo
+ *
+ * Determines if this object is immediately adjacent to another.
+ *
+ * @param itemName The name of the other object; this may be any kind of
+ * places item
+ * @param relativePos The relative position of the other object. If -1,
+ * it means the other object should precede this one, if +1,
+ * the other object should come after this one
+ * @return true if this object is immediately adjacent to the other object,
+ * otherwise false
+ */
+ async IsAdjacentTo(itemName, relativePos) {
+ Logger.AssertTrue(
+ this.props.folder_id != -1 && this.props.guid != null,
+ "Either folder_id or guid was invalid"
+ );
+ let otherGuid = await this.GetPlacesChildGuid(
+ this.props.parentGuid,
+ null,
+ itemName
+ );
+ Logger.AssertTrue(otherGuid, "item " + itemName + " not found");
+ let other_pos = (await PlacesUtils.bookmarks.fetch(otherGuid)).index;
+ let this_pos = (await PlacesUtils.bookmarks.fetch(this.props.guid)).index;
+ if (other_pos + relativePos != this_pos) {
+ Logger.logPotentialError(
+ "Invalid position - " +
+ (this.props.title ? this.props.title : this.props.folder) +
+ " not " +
+ (relativePos == 1 ? "after " : "before ") +
+ itemName +
+ " for " +
+ this.toString()
+ );
+ return false;
+ }
+ return true;
+ },
+
+ /**
+ * GetItemIndex
+ *
+ * Gets the item index for this places item.
+ *
+ * @return the item index, or -1 if there's an error
+ */
+ async GetItemIndex() {
+ if (this.props.guid == null) {
+ return -1;
+ }
+ return (await PlacesUtils.bookmarks.fetch(this.props.guid)).index;
+ },
+
+ /**
+ * GetFolder
+ *
+ * Gets the folder guid for the specified bookmark folder
+ *
+ * @param location The full path of the folder, which must begin
+ * with one of the bookmark root folders
+ * @return the folder guid if the folder is found, otherwise null
+ */
+ async GetFolder(location) {
+ let folder_parts = location.split("/");
+ if (!(folder_parts[0] in this._bookmarkFolders)) {
+ return null;
+ }
+ let folderGuid = this._bookmarkFolders[folder_parts[0]];
+ for (let i = 1; i < folder_parts.length; i++) {
+ let guid = await this.GetPlacesChildGuid(
+ folderGuid,
+ PlacesUtils.bookmarks.TYPE_FOLDER,
+ folder_parts[i]
+ );
+ if (guid == null) {
+ return null;
+ }
+ folderGuid = guid;
+ }
+ return folderGuid;
+ },
+
+ /**
+ * CreateFolder
+ *
+ * Creates a bookmark folder.
+ *
+ * @param location The full path of the folder, which must begin
+ * with one of the bookmark root folders
+ * @return the folder id if the folder was created, otherwise -1
+ */
+ async CreateFolder(location) {
+ let folder_parts = location.split("/");
+ if (!(folder_parts[0] in this._bookmarkFolders)) {
+ return -1;
+ }
+ let folderGuid = this._bookmarkFolders[folder_parts[0]];
+ for (let i = 1; i < folder_parts.length; i++) {
+ let subfolderGuid = await this.GetPlacesChildGuid(
+ folderGuid,
+ PlacesUtils.bookmarks.TYPE_FOLDER,
+ folder_parts[i]
+ );
+ if (subfolderGuid == null) {
+ let { guid } = await PlacesUtils.bookmarks.insert({
+ parentGuid: folderGuid,
+ name: folder_parts[i],
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+ folderGuid = guid;
+ } else {
+ folderGuid = subfolderGuid;
+ }
+ }
+ return folderGuid;
+ },
+
+ /**
+ * GetOrCreateFolder
+ *
+ * Locates the specified folder; if not found it is created.
+ *
+ * @param location The full path of the folder, which must begin
+ * with one of the bookmark root folders
+ * @return the folder id if the folder was found or created, otherwise -1
+ */
+ async GetOrCreateFolder(location) {
+ let parentGuid = await this.GetFolder(location);
+ if (parentGuid == null) {
+ parentGuid = await this.CreateFolder(location);
+ }
+ return parentGuid;
+ },
+
+ /**
+ * CheckPosition
+ *
+ * Verifies the position of this places item.
+ *
+ * @param before The name of the places item that this item should be
+ before, or null if this check should be skipped
+ * @param after The name of the places item that this item should be
+ after, or null if this check should be skipped
+ * @param last_item_pos The index of the places item above this one,
+ * or null if this check should be skipped
+ * @return true if this item is in the correct position, otherwise false
+ */
+ async CheckPosition(before, after, last_item_pos) {
+ if (after) {
+ if (!(await this.IsAdjacentTo(after, 1))) {
+ return false;
+ }
+ }
+ if (before) {
+ if (!(await this.IsAdjacentTo(before, -1))) {
+ return false;
+ }
+ }
+ if (last_item_pos != null && last_item_pos > -1) {
+ let index = await this.GetItemIndex();
+ if (index != last_item_pos + 1) {
+ Logger.logPotentialError(
+ "Item not found at the expected index, got " +
+ index +
+ ", expected " +
+ (last_item_pos + 1) +
+ " for " +
+ this.toString()
+ );
+ return false;
+ }
+ }
+ return true;
+ },
+
+ /**
+ * SetLocation
+ *
+ * Moves this places item to a different folder.
+ *
+ * @param location The full path of the folder to which to move this
+ * places item, which must begin with one of the bookmark root
+ * folders; if null, no changes are made
+ * @return nothing if successful, otherwise an exception is thrown
+ */
+ async SetLocation(location) {
+ if (location != null) {
+ let newfolderGuid = await this.GetOrCreateFolder(location);
+ Logger.AssertTrue(
+ newfolderGuid,
+ "Location " + location + " doesn't exist; can't change item's location"
+ );
+ await PlacesUtils.bookmarks.update({
+ guid: this.props.guid,
+ parentGuid: newfolderGuid,
+ index: PlacesUtils.bookmarks.DEFAULT_INDEX,
+ });
+ this.props.parentGuid = newfolderGuid;
+ }
+ },
+
+ /**
+ * SetPosition
+ *
+ * Updates the position of this places item within this item's current
+ * folder. Use SetLocation to change folders.
+ *
+ * @param position The new index this item should be moved to; if null,
+ * no changes are made; if -1, this item is moved to the bottom of
+ * the current folder. Otherwise, must be a string which is the
+ * title of an existing item in the folder, who's current position
+ * is used as the index.
+ * @return nothing if successful, otherwise an exception is thrown
+ */
+ async SetPosition(position) {
+ if (position == null) {
+ return;
+ }
+ let index = -1;
+ if (position != -1) {
+ let existingGuid = await this.GetPlacesChildGuid(
+ this.props.parentGuid,
+ null,
+ position
+ );
+ if (existingGuid) {
+ index = (await PlacesUtils.bookmarks.fetch(existingGuid)).index;
+ }
+ Logger.AssertTrue(
+ index != -1,
+ "position " + position + " is invalid; unable to change position"
+ );
+ }
+ await PlacesUtils.bookmarks.update({ guid: this.props.guid, index });
+ },
+
+ /**
+ * Update the title of this places item
+ *
+ * @param title The new title to set for this item; if null, no changes
+ * are made
+ * @return nothing
+ */
+ async SetTitle(title) {
+ if (title != null) {
+ await PlacesUtils.bookmarks.update({ guid: this.props.guid, title });
+ }
+ },
+};
+
+/**
+ * Bookmark class constructor. Initializes instance properties.
+ */
+export function Bookmark(props) {
+ PlacesItem.call(this, props);
+ if (this.props.title == null) {
+ this.props.title = this.props.uri;
+ }
+ this.props.type = "bookmark";
+}
+
+/**
+ * Bookmark instance methods.
+ */
+Bookmark.prototype = {
+ /**
+ * SetKeyword
+ *
+ * Update this bookmark's keyword.
+ *
+ * @param keyword The keyword to set for this bookmark; if null, no
+ * changes are made
+ * @return nothing
+ */
+ async SetKeyword(keyword) {
+ if (keyword != null) {
+ // Mirror logic from PlacesSyncUtils's updateBookmarkMetadata
+ let entry = await PlacesUtils.keywords.fetch({ url: this.props.uri });
+ if (entry) {
+ await PlacesUtils.keywords.remove(entry);
+ }
+ await PlacesUtils.keywords.insert({ keyword, url: this.props.uri });
+ }
+ },
+
+ /**
+ * SetUri
+ *
+ * Updates this bookmark's URI.
+ *
+ * @param uri The new URI to set for this boomark; if null, no changes
+ * are made
+ * @return nothing
+ */
+ async SetUri(uri) {
+ if (uri) {
+ let url = Services.io.newURI(uri);
+ await PlacesUtils.bookmarks.update({ guid: this.props.guid, url });
+ }
+ },
+
+ /**
+ * SetTags
+ *
+ * Updates this bookmark's tags.
+ *
+ * @param tags An array of tags which should be associated with this
+ * bookmark; any previous tags are removed; if this param is null,
+ * no changes are made. If this param is an empty array, all
+ * tags are removed from this bookmark.
+ * @return nothing
+ */
+ SetTags(tags) {
+ if (tags != null) {
+ let URI = Services.io.newURI(this.props.uri);
+ PlacesUtils.tagging.untagURI(URI, null);
+ if (tags.length) {
+ PlacesUtils.tagging.tagURI(URI, tags);
+ }
+ }
+ },
+
+ /**
+ * Create
+ *
+ * Creates the bookmark described by this object's properties.
+ *
+ * @return the id of the created bookmark
+ */
+ async Create() {
+ this.props.parentGuid = await this.GetOrCreateFolder(this.props.location);
+ Logger.AssertTrue(
+ this.props.parentGuid,
+ "Unable to create " +
+ "bookmark, error creating folder " +
+ this.props.location
+ );
+ let bookmarkURI = Services.io.newURI(this.props.uri);
+ let { guid } = await PlacesUtils.bookmarks.insert({
+ parentGuid: this.props.parentGuid,
+ url: bookmarkURI,
+ title: this.props.title,
+ });
+ this.props.guid = guid;
+ await this.SetKeyword(this.props.keyword);
+ await this.SetTags(this.props.tags);
+ return this.props.guid;
+ },
+
+ /**
+ * Update
+ *
+ * Updates this bookmark's properties according the properties on this
+ * object's 'updateProps' property.
+ *
+ * @return nothing
+ */
+ async Update() {
+ Logger.AssertTrue(this.props.guid, "Invalid guid during Update");
+ await this.SetTitle(this.updateProps.title);
+ await this.SetUri(this.updateProps.uri);
+ await this.SetKeyword(this.updateProps.keyword);
+ await this.SetTags(this.updateProps.tags);
+ await this.SetLocation(this.updateProps.location);
+ await this.SetPosition(this.updateProps.position);
+ },
+
+ /**
+ * Find
+ *
+ * Locates the bookmark which corresponds to this object's properties.
+ *
+ * @return the bookmark guid if the bookmark was found, otherwise null
+ */
+ async Find() {
+ this.props.parentGuid = await this.GetFolder(this.props.location);
+
+ if (this.props.parentGuid == null) {
+ Logger.logError("Unable to find folder " + this.props.location);
+ return null;
+ }
+ let bookmarkTitle = this.props.title;
+ this.props.guid = await this.GetPlacesChildGuid(
+ this.props.parentGuid,
+ null,
+ bookmarkTitle,
+ this.props.uri
+ );
+
+ if (!this.props.guid) {
+ Logger.logPotentialError(this.toString() + " not found");
+ return null;
+ }
+ if (this.props.keyword != null) {
+ let { keyword } = await PlacesSyncUtils.bookmarks.fetch(this.props.guid);
+ if (keyword != this.props.keyword) {
+ Logger.logPotentialError(
+ "Incorrect keyword - expected: " +
+ this.props.keyword +
+ ", actual: " +
+ keyword +
+ " for " +
+ this.toString()
+ );
+ return null;
+ }
+ }
+ if (this.props.tags != null) {
+ try {
+ let URI = Services.io.newURI(this.props.uri);
+ let tags = PlacesUtils.tagging.getTagsForURI(URI);
+ tags.sort();
+ this.props.tags.sort();
+ if (JSON.stringify(tags) != JSON.stringify(this.props.tags)) {
+ Logger.logPotentialError(
+ "Wrong tags - expected: " +
+ JSON.stringify(this.props.tags) +
+ ", actual: " +
+ JSON.stringify(tags) +
+ " for " +
+ this.toString()
+ );
+ return null;
+ }
+ } catch (e) {
+ Logger.logPotentialError("error processing tags " + e);
+ return null;
+ }
+ }
+ if (
+ !(await this.CheckPosition(
+ this.props.before,
+ this.props.after,
+ this.props.last_item_pos
+ ))
+ ) {
+ return null;
+ }
+ return this.props.guid;
+ },
+
+ /**
+ * Remove
+ *
+ * Removes this bookmark. The bookmark should have been located previously
+ * by a call to Find.
+ *
+ * @return nothing
+ */
+ async Remove() {
+ Logger.AssertTrue(this.props.guid, "Invalid guid during Remove");
+ await PlacesUtils.bookmarks.remove(this.props.guid);
+ },
+};
+
+extend(Bookmark, PlacesItem);
+
+/**
+ * BookmarkFolder class constructor. Initializes instance properties.
+ */
+export function BookmarkFolder(props) {
+ PlacesItem.call(this, props);
+ this.props.type = "folder";
+}
+
+/**
+ * BookmarkFolder instance methods
+ */
+BookmarkFolder.prototype = {
+ /**
+ * Create
+ *
+ * Creates the bookmark folder described by this object's properties.
+ *
+ * @return the id of the created bookmark folder
+ */
+ async Create() {
+ this.props.parentGuid = await this.GetOrCreateFolder(this.props.location);
+ Logger.AssertTrue(
+ this.props.parentGuid,
+ "Unable to create " +
+ "folder, error creating parent folder " +
+ this.props.location
+ );
+ let { guid } = await PlacesUtils.bookmarks.insert({
+ parentGuid: this.props.parentGuid,
+ title: this.props.folder,
+ index: PlacesUtils.bookmarks.DEFAULT_INDEX,
+ type: PlacesUtils.bookmarks.TYPE_FOLDER,
+ });
+ this.props.guid = guid;
+ return this.props.parentGuid;
+ },
+
+ /**
+ * Find
+ *
+ * Locates the bookmark folder which corresponds to this object's
+ * properties.
+ *
+ * @return the folder guid if the folder was found, otherwise null
+ */
+ async Find() {
+ this.props.parentGuid = await this.GetFolder(this.props.location);
+ if (this.props.parentGuid == null) {
+ Logger.logError("Unable to find folder " + this.props.location);
+ return null;
+ }
+ this.props.guid = await this.GetPlacesChildGuid(
+ this.props.parentGuid,
+ PlacesUtils.bookmarks.TYPE_FOLDER,
+ this.props.folder
+ );
+ if (this.props.guid == null) {
+ return null;
+ }
+ if (
+ !(await this.CheckPosition(
+ this.props.before,
+ this.props.after,
+ this.props.last_item_pos
+ ))
+ ) {
+ return null;
+ }
+ return this.props.guid;
+ },
+
+ /**
+ * Remove
+ *
+ * Removes this folder. The folder should have been located previously
+ * by a call to Find.
+ *
+ * @return nothing
+ */
+ async Remove() {
+ Logger.AssertTrue(this.props.guid, "Invalid guid during Remove");
+ await PlacesUtils.bookmarks.remove(this.props.guid);
+ },
+
+ /**
+ * Update
+ *
+ * Updates this bookmark's properties according the properties on this
+ * object's 'updateProps' property.
+ *
+ * @return nothing
+ */
+ async Update() {
+ Logger.AssertTrue(this.props.guid, "Invalid guid during Update");
+ await this.SetLocation(this.updateProps.location);
+ await this.SetPosition(this.updateProps.position);
+ await this.SetTitle(this.updateProps.folder);
+ },
+};
+
+extend(BookmarkFolder, PlacesItem);
+
+/**
+ * Separator class constructor. Initializes instance properties.
+ */
+export function Separator(props) {
+ PlacesItem.call(this, props);
+ this.props.type = "separator";
+}
+
+/**
+ * Separator instance methods.
+ */
+Separator.prototype = {
+ /**
+ * Create
+ *
+ * Creates the bookmark separator described by this object's properties.
+ *
+ * @return the id of the created separator
+ */
+ async Create() {
+ this.props.parentGuid = await this.GetOrCreateFolder(this.props.location);
+ Logger.AssertTrue(
+ this.props.parentGuid,
+ "Unable to create " +
+ "folder, error creating parent folder " +
+ this.props.location
+ );
+ let { guid } = await PlacesUtils.bookmarks.insert({
+ parentGuid: this.props.parentGuid,
+ type: PlacesUtils.bookmarks.TYPE_SEPARATOR,
+ });
+ this.props.guid = guid;
+ return guid;
+ },
+
+ /**
+ * Find
+ *
+ * Locates the bookmark separator which corresponds to this object's
+ * properties.
+ *
+ * @return the item guid if the separator was found, otherwise null
+ */
+ async Find() {
+ this.props.parentGuid = await this.GetFolder(this.props.location);
+ if (this.props.parentGuid == null) {
+ Logger.logError("Unable to find folder " + this.props.location);
+ return null;
+ }
+ if (this.props.before == null && this.props.last_item_pos == null) {
+ Logger.logPotentialError(
+ "Separator requires 'before' attribute if it's the" +
+ "first item in the list"
+ );
+ return null;
+ }
+ let expected_pos = -1;
+ if (this.props.before) {
+ let otherGuid = this.GetPlacesChildGuid(
+ this.props.parentGuid,
+ null,
+ this.props.before
+ );
+ if (otherGuid == null) {
+ Logger.logPotentialError(
+ "Can't find places item " +
+ this.props.before +
+ " for locating separator"
+ );
+ return null;
+ }
+ expected_pos = (await PlacesUtils.bookmarks.fetch(otherGuid)).index - 1;
+ } else {
+ expected_pos = this.props.last_item_pos + 1;
+ }
+ // Note these are IDs instead of GUIDs.
+ let children = await PlacesSyncUtils.bookmarks.fetchChildRecordIds(
+ this.props.parentGuid
+ );
+ this.props.guid = children[expected_pos];
+ if (this.props.guid == null) {
+ Logger.logPotentialError(
+ "No separator found at position " + expected_pos
+ );
+ return null;
+ }
+ let info = await PlacesUtils.bookmarks.fetch(this.props.guid);
+ if (info.type != PlacesUtils.bookmarks.TYPE_SEPARATOR) {
+ Logger.logPotentialError(
+ "Places item at position " + expected_pos + " is not a separator"
+ );
+ return null;
+ }
+ return this.props.guid;
+ },
+
+ /**
+ * Update
+ *
+ * Updates this separator's properties according the properties on this
+ * object's 'updateProps' property.
+ *
+ * @return nothing
+ */
+ async Update() {
+ Logger.AssertTrue(this.props.guid, "Invalid guid during Update");
+ await this.SetLocation(this.updateProps.location);
+ await this.SetPosition(this.updateProps.position);
+ return true;
+ },
+
+ /**
+ * Remove
+ *
+ * Removes this separator. The separator should have been located
+ * previously by a call to Find.
+ *
+ * @return nothing
+ */
+ async Remove() {
+ Logger.AssertTrue(this.props.guid, "Invalid guid during Update");
+ await PlacesUtils.bookmarks.remove(this.props.guid);
+ },
+};
+
+extend(Separator, PlacesItem);
diff --git a/services/sync/tps/extensions/tps/resource/modules/formautofill.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/formautofill.sys.mjs
new file mode 100644
index 0000000000..587d7668f4
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/formautofill.sys.mjs
@@ -0,0 +1,128 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ * ChromeUtils.import() and acts as a singleton. Only the following
+ * listed symbols will exposed on import, and only when and where imported.
+ */
+
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ OSKeyStore: "resource://gre/modules/OSKeyStore.sys.mjs",
+ formAutofillStorage: "resource://autofill/FormAutofillStorage.sys.mjs",
+});
+
+class FormAutofillBase {
+ constructor(props, subStorageName, fields) {
+ this._subStorageName = subStorageName;
+ this._fields = fields;
+
+ this.props = {};
+ this.updateProps = null;
+ if ("changes" in props) {
+ this.updateProps = props.changes;
+ }
+ for (const field of this._fields) {
+ this.props[field] = field in props ? props[field] : null;
+ }
+ }
+
+ async getStorage() {
+ await lazy.formAutofillStorage.initialize();
+ return lazy.formAutofillStorage[this._subStorageName];
+ }
+
+ async Create() {
+ const storage = await this.getStorage();
+ await storage.add(this.props);
+ }
+
+ async Find() {
+ const storage = await this.getStorage();
+ return storage._data.find(entry =>
+ this._fields.every(field => entry[field] === this.props[field])
+ );
+ }
+
+ async Update() {
+ const storage = await this.getStorage();
+ const { guid } = await this.Find();
+ await storage.update(guid, this.updateProps, true);
+ }
+
+ async Remove() {
+ const storage = await this.getStorage();
+ const { guid } = await this.Find();
+ storage.remove(guid);
+ }
+}
+
+async function DumpStorage(subStorageName) {
+ await lazy.formAutofillStorage.initialize();
+ Logger.logInfo(`\ndumping ${subStorageName} list\n`, true);
+ const entries = lazy.formAutofillStorage[subStorageName]._data;
+ for (const entry of entries) {
+ Logger.logInfo(JSON.stringify(entry), true);
+ }
+ Logger.logInfo(`\n\nend ${subStorageName} list\n`, true);
+}
+
+const ADDRESS_FIELDS = [
+ "given-name",
+ "additional-name",
+ "family-name",
+ "organization",
+ "street-address",
+ "address-level2",
+ "address-level1",
+ "postal-code",
+ "country",
+ "tel",
+ "email",
+];
+
+export class Address extends FormAutofillBase {
+ constructor(props) {
+ super(props, "addresses", ADDRESS_FIELDS);
+ }
+}
+
+export async function DumpAddresses() {
+ await DumpStorage("addresses");
+}
+
+const CREDIT_CARD_FIELDS = [
+ "cc-name",
+ "cc-number",
+ "cc-exp-month",
+ "cc-exp-year",
+];
+
+export class CreditCard extends FormAutofillBase {
+ constructor(props) {
+ super(props, "creditCards", CREDIT_CARD_FIELDS);
+ }
+
+ async Find() {
+ const storage = await this.getStorage();
+ await Promise.all(
+ storage._data.map(
+ async entry =>
+ (entry["cc-number"] = await lazy.OSKeyStore.decrypt(
+ entry["cc-number-encrypted"]
+ ))
+ )
+ );
+ return storage._data.find(entry => {
+ return this._fields.every(field => entry[field] === this.props[field]);
+ });
+ }
+}
+
+export async function DumpCreditCards() {
+ await DumpStorage("creditCards");
+}
diff --git a/services/sync/tps/extensions/tps/resource/modules/forms.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/forms.sys.mjs
new file mode 100644
index 0000000000..35b5f5c03b
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/forms.sys.mjs
@@ -0,0 +1,205 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ ChromeUtils.import() and acts as a singleton. Only the following
+ listed symbols will exposed on import, and only when and where imported.
+ */
+
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+import { FormHistory } from "resource://gre/modules/FormHistory.sys.mjs";
+
+/**
+ * FormDB
+ *
+ * Helper object containing methods to interact with the FormHistory module.
+ */
+var FormDB = {
+ async _update(data) {
+ await FormHistory.update(data);
+ },
+
+ /**
+ * insertValue
+ *
+ * Adds the specified value for the specified fieldname into form history.
+ *
+ * @param fieldname The form fieldname to insert
+ * @param value The form value to insert
+ * @param us The time, in microseconds, to use for the lastUsed
+ * and firstUsed columns
+ * @return Promise<undefined>
+ */
+ insertValue(fieldname, value, us) {
+ let data = {
+ op: "add",
+ fieldname,
+ value,
+ timesUsed: 1,
+ firstUsed: us,
+ lastUsed: us,
+ };
+ return this._update(data);
+ },
+
+ /**
+ * updateValue
+ *
+ * Updates a row in the moz_formhistory table with a new value.
+ *
+ * @param id The id of the row to update
+ * @param newvalue The new value to set
+ * @return Promise<undefined>
+ */
+ updateValue(id, newvalue) {
+ return this._update({ op: "update", guid: id, value: newvalue });
+ },
+
+ /**
+ * getDataForValue
+ *
+ * Retrieves a set of values for a row in the database that
+ * corresponds to the given fieldname and value.
+ *
+ * @param fieldname The fieldname of the row to query
+ * @param value The value of the row to query
+ * @return Promise<null if no row is found with the specified fieldname and value,
+ * or an object containing the row's guid, lastUsed, and firstUsed
+ * values>
+ */
+ async getDataForValue(fieldname, value) {
+ let results = await FormHistory.search(["guid", "lastUsed", "firstUsed"], {
+ fieldname,
+ value,
+ });
+ if (results.length > 1) {
+ throw new Error("more than 1 result for this query");
+ }
+ return results;
+ },
+
+ /**
+ * remove
+ *
+ * Removes the specified GUID from the database.
+ *
+ * @param guid The guid of the item to delete
+ * @return Promise<>
+ */
+ remove(guid) {
+ return this._update({ op: "remove", guid });
+ },
+};
+
+/**
+ * FormData class constructor
+ *
+ * Initializes instance properties.
+ */
+export function FormData(props, msSinceEpoch) {
+ this.fieldname = null;
+ this.value = null;
+ this.date = 0;
+ this.newvalue = null;
+ this.usSinceEpoch = msSinceEpoch * 1000;
+
+ for (var prop in props) {
+ if (prop in this) {
+ this[prop] = props[prop];
+ }
+ }
+}
+
+/**
+ * FormData instance methods
+ */
+FormData.prototype = {
+ /**
+ * hours_to_us
+ *
+ * Converts hours since present to microseconds since epoch.
+ *
+ * @param hours The number of hours since the present time (e.g., 0 is
+ * 'now', and -1 is 1 hour ago)
+ * @return the corresponding number of microseconds since the epoch
+ */
+ hours_to_us(hours) {
+ return this.usSinceEpoch + hours * 60 * 60 * 1000 * 1000;
+ },
+
+ /**
+ * Create
+ *
+ * If this FormData object doesn't exist in the moz_formhistory database,
+ * add it. Throws on error.
+ *
+ * @return nothing
+ */
+ Create() {
+ Logger.AssertTrue(
+ this.fieldname != null && this.value != null,
+ "Must specify both fieldname and value"
+ );
+
+ return FormDB.getDataForValue(this.fieldname, this.value).then(formdata => {
+ if (!formdata) {
+ // this item doesn't exist yet in the db, so we need to insert it
+ return FormDB.insertValue(
+ this.fieldname,
+ this.value,
+ this.hours_to_us(this.date)
+ );
+ }
+ /* Right now, we ignore this case. If bug 552531 is ever fixed,
+ we might need to add code here to update the firstUsed or
+ lastUsed fields, as appropriate.
+ */
+ return null;
+ });
+ },
+
+ /**
+ * Find
+ *
+ * Attempts to locate an entry in the moz_formhistory database that
+ * matches the fieldname and value for this FormData object.
+ *
+ * @return true if this entry exists in the database, otherwise false
+ */
+ Find() {
+ return FormDB.getDataForValue(this.fieldname, this.value).then(formdata => {
+ let status = formdata != null;
+ if (status) {
+ /*
+ //form history dates currently not synced! bug 552531
+ let us = this.hours_to_us(this.date);
+ status = Logger.AssertTrue(
+ us >= formdata.firstUsed && us <= formdata.lastUsed,
+ "No match for with that date value");
+
+ if (status)
+ */
+ this.id = formdata.guid;
+ }
+ return status;
+ });
+ },
+
+ /**
+ * Remove
+ *
+ * Removes the row represented by this FormData instance from the
+ * moz_formhistory database.
+ *
+ * @return nothing
+ */
+ async Remove() {
+ const formdata = await FormDB.getDataForValue(this.fieldname, this.value);
+ if (!formdata) {
+ return;
+ }
+ await FormDB.remove(formdata.guid);
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/modules/history.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/history.sys.mjs
new file mode 100644
index 0000000000..845bab3aa9
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/history.sys.mjs
@@ -0,0 +1,158 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ * ChromeUtils.import() and acts as a singleton. Only the following
+ * listed symbols will exposed on import, and only when and where imported.
+ */
+
+import { PlacesUtils } from "resource://gre/modules/PlacesUtils.sys.mjs";
+
+import { PlacesSyncUtils } from "resource://gre/modules/PlacesSyncUtils.sys.mjs";
+
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+export var DumpHistory = async function TPS_History__DumpHistory() {
+ let query = PlacesUtils.history.getNewQuery();
+ let options = PlacesUtils.history.getNewQueryOptions();
+ let root = PlacesUtils.history.executeQuery(query, options).root;
+ root.containerOpen = true;
+ Logger.logInfo("\n\ndumping history\n", true);
+ for (var i = 0; i < root.childCount; i++) {
+ let node = root.getChild(i);
+ let uri = node.uri;
+ let guid = await PlacesSyncUtils.history
+ .fetchGuidForURL(uri)
+ .catch(() => "?".repeat(12));
+ let curvisits = await PlacesSyncUtils.history.fetchVisitsForURL(uri);
+ for (var visit of curvisits) {
+ Logger.logInfo(
+ `GUID: ${guid}, URI: ${uri}, type=${visit.type}, date=${visit.date}`,
+ true
+ );
+ }
+ }
+ root.containerOpen = false;
+ Logger.logInfo("\nend history dump\n", true);
+};
+
+/**
+ * HistoryEntry object
+ *
+ * Contains methods for manipulating browser history entries.
+ */
+export var HistoryEntry = {
+ /**
+ * Add
+ *
+ * Adds visits for a uri to the history database. Throws on error.
+ *
+ * @param item An object representing one or more visits to a specific uri
+ * @param usSinceEpoch The number of microseconds from Epoch to
+ * the time the current Crossweave run was started
+ * @return nothing
+ */
+ async Add(item, msSinceEpoch) {
+ Logger.AssertTrue(
+ "visits" in item && "uri" in item,
+ "History entry in test file must have both 'visits' " +
+ "and 'uri' properties"
+ );
+ let place = {
+ url: item.uri,
+ visits: [],
+ };
+ for (let visit of item.visits) {
+ let date = new Date(
+ Math.round(msSinceEpoch + visit.date * 60 * 60 * 1000)
+ );
+ place.visits.push({ date, transition: visit.type });
+ }
+ if ("title" in item) {
+ place.title = item.title;
+ }
+ return PlacesUtils.history.insert(place);
+ },
+
+ /**
+ * Find
+ *
+ * Finds visits for a uri to the history database. Throws on error.
+ *
+ * @param item An object representing one or more visits to a specific uri
+ * @param usSinceEpoch The number of microseconds from Epoch to
+ * the time the current Crossweave run was started
+ * @return true if all the visits for the uri are found, otherwise false
+ */
+ async Find(item, msSinceEpoch) {
+ Logger.AssertTrue(
+ "visits" in item && "uri" in item,
+ "History entry in test file must have both 'visits' " +
+ "and 'uri' properties"
+ );
+ let curvisits = await PlacesSyncUtils.history.fetchVisitsForURL(item.uri);
+ for (let visit of curvisits) {
+ for (let itemvisit of item.visits) {
+ // Note: in microseconds.
+ let expectedDate =
+ itemvisit.date * 60 * 60 * 1000 * 1000 + msSinceEpoch * 1000;
+ if (visit.type == itemvisit.type) {
+ if (itemvisit.date === undefined || visit.date == expectedDate) {
+ itemvisit.found = true;
+ }
+ }
+ }
+ }
+
+ let all_items_found = true;
+ for (let itemvisit of item.visits) {
+ all_items_found = all_items_found && "found" in itemvisit;
+ Logger.logInfo(
+ `History entry for ${item.uri}, type: ${itemvisit.type}, date: ${itemvisit.date}` +
+ `(${
+ itemvisit.date * 60 * 60 * 1000 * 1000
+ }), found = ${!!itemvisit.found}`
+ );
+ }
+ return all_items_found;
+ },
+
+ /**
+ * Delete
+ *
+ * Removes visits from the history database. Throws on error.
+ *
+ * @param item An object representing items to delete
+ * @param usSinceEpoch The number of microseconds from Epoch to
+ * the time the current Crossweave run was started
+ * @return nothing
+ */
+ async Delete(item, msSinceEpoch) {
+ if ("uri" in item) {
+ let removedAny = await PlacesUtils.history.remove(item.uri);
+ if (!removedAny) {
+ Logger.log("Warning: Removed 0 history visits for uri " + item.uri);
+ }
+ } else if ("host" in item) {
+ await PlacesUtils.history.removeByFilter({ host: item.host });
+ } else if ("begin" in item && "end" in item) {
+ let filter = {
+ beginDate: new Date(msSinceEpoch + item.begin * 60 * 60 * 1000),
+ endDate: new Date(msSinceEpoch + item.end * 60 * 60 * 1000),
+ };
+ let removedAny = await PlacesUtils.history.removeVisitsByFilter(filter);
+ if (!removedAny) {
+ Logger.log(
+ "Warning: Removed 0 history visits with " +
+ JSON.stringify({ item, filter })
+ );
+ }
+ } else {
+ Logger.AssertTrue(
+ false,
+ "invalid entry in delete history " + JSON.stringify(item)
+ );
+ }
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/modules/passwords.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/passwords.sys.mjs
new file mode 100644
index 0000000000..976755e989
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/passwords.sys.mjs
@@ -0,0 +1,187 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ * ChromeUtils.import() and acts as a singleton. Only the following
+ * listed symbols will exposed on import, and only when and where imported.
+ */
+
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+var nsLoginInfo = new Components.Constructor(
+ "@mozilla.org/login-manager/loginInfo;1",
+ Ci.nsILoginInfo,
+ "init"
+);
+
+export var DumpPasswords = async function TPS__Passwords__DumpPasswords() {
+ let logins = await Services.logins.getAllLogins();
+ Logger.logInfo("\ndumping password list\n", true);
+ for (var i = 0; i < logins.length; i++) {
+ Logger.logInfo(
+ "* origin=" +
+ logins[i].origin +
+ ", formActionOrigin=" +
+ logins[i].formActionOrigin +
+ ", realm=" +
+ logins[i].httpRealm +
+ ", password=" +
+ logins[i].password +
+ ", passwordField=" +
+ logins[i].passwordField +
+ ", username=" +
+ logins[i].username +
+ ", usernameField=" +
+ logins[i].usernameField,
+ true
+ );
+ }
+ Logger.logInfo("\n\nend password list\n", true);
+};
+
+/**
+ * PasswordProps object; holds password properties.
+ */
+function PasswordProps(props) {
+ this.hostname = null;
+ this.submitURL = null;
+ this.realm = null;
+ this.username = "";
+ this.password = "";
+ this.usernameField = "";
+ this.passwordField = "";
+ this.delete = false;
+
+ for (var prop in props) {
+ if (prop in this) {
+ this[prop] = props[prop];
+ }
+ }
+}
+
+/**
+ * Password class constructor. Initializes instance properties.
+ */
+export function Password(props) {
+ this.props = new PasswordProps(props);
+ if ("changes" in props) {
+ this.updateProps = new PasswordProps(props);
+ for (var prop in props.changes) {
+ if (prop in this.updateProps) {
+ this.updateProps[prop] = props.changes[prop];
+ }
+ }
+ } else {
+ this.updateProps = null;
+ }
+}
+
+/**
+ * Password instance methods.
+ */
+Password.prototype = {
+ /**
+ * Create
+ *
+ * Adds a password entry to the login manager for the password
+ * represented by this object's properties. Throws on error.
+ *
+ * @return the new login guid
+ */
+ async Create() {
+ let login = new nsLoginInfo(
+ this.props.hostname,
+ this.props.submitURL,
+ this.props.realm,
+ this.props.username,
+ this.props.password,
+ this.props.usernameField,
+ this.props.passwordField
+ );
+ await Services.logins.addLoginAsync(login);
+ login.QueryInterface(Ci.nsILoginMetaInfo);
+ return login.guid;
+ },
+
+ /**
+ * Find
+ *
+ * Finds a password entry in the login manager, for the password
+ * represented by this object's properties.
+ *
+ * @return the guid of the password if found, otherwise -1
+ */
+ async Find() {
+ let logins = await Services.logins.searchLoginsAsync({
+ origin: this.props.hostname,
+ formActionOrigin: this.props.submitURL,
+ httpRealm: this.props.realm,
+ });
+ for (var i = 0; i < logins.length; i++) {
+ if (
+ logins[i].username == this.props.username &&
+ logins[i].password == this.props.password &&
+ logins[i].usernameField == this.props.usernameField &&
+ logins[i].passwordField == this.props.passwordField
+ ) {
+ logins[i].QueryInterface(Ci.nsILoginMetaInfo);
+ return logins[i].guid;
+ }
+ }
+ return -1;
+ },
+
+ /**
+ * Update
+ *
+ * Updates an existing password entry in the login manager with
+ * new properties. Throws on error. The 'old' properties are this
+ * object's properties, the 'new' properties are the properties in
+ * this object's 'updateProps' object.
+ *
+ * @return nothing
+ */
+ Update() {
+ let oldlogin = new nsLoginInfo(
+ this.props.hostname,
+ this.props.submitURL,
+ this.props.realm,
+ this.props.username,
+ this.props.password,
+ this.props.usernameField,
+ this.props.passwordField
+ );
+ let newlogin = new nsLoginInfo(
+ this.updateProps.hostname,
+ this.updateProps.submitURL,
+ this.updateProps.realm,
+ this.updateProps.username,
+ this.updateProps.password,
+ this.updateProps.usernameField,
+ this.updateProps.passwordField
+ );
+ Services.logins.modifyLogin(oldlogin, newlogin);
+ },
+
+ /**
+ * Remove
+ *
+ * Removes an entry from the login manager for a password which
+ * matches this object's properties. Throws on error.
+ *
+ * @return nothing
+ */
+ Remove() {
+ let login = new nsLoginInfo(
+ this.props.hostname,
+ this.props.submitURL,
+ this.props.realm,
+ this.props.username,
+ this.props.password,
+ this.props.usernameField,
+ this.props.passwordField
+ );
+ Services.logins.removeLogin(login);
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/modules/prefs.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/prefs.sys.mjs
new file mode 100644
index 0000000000..c629ef4a73
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/prefs.sys.mjs
@@ -0,0 +1,122 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ ChromeUtils.import() and acts as a singleton.
+ Only the following listed symbols will exposed on import, and only when
+ and where imported. */
+
+const WEAVE_PREF_PREFIX = "services.sync.prefs.sync.";
+
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+/**
+ * Preference class constructor
+ *
+ * Initializes instance properties.
+ */
+export function Preference(props) {
+ Logger.AssertTrue(
+ "name" in props && "value" in props,
+ "Preference must have both name and value"
+ );
+
+ this.name = props.name;
+ this.value = props.value;
+}
+
+/**
+ * Preference instance methods
+ */
+Preference.prototype = {
+ /**
+ * Modify
+ *
+ * Sets the value of the preference this.name to this.value.
+ * Throws on error.
+ *
+ * @return nothing
+ */
+ Modify() {
+ // Determine if this pref is actually something Weave even looks at.
+ let weavepref = WEAVE_PREF_PREFIX + this.name;
+ try {
+ let syncPref = Services.prefs.getBoolPref(weavepref);
+ if (!syncPref) {
+ Services.prefs.setBoolPref(weavepref, true);
+ }
+ } catch (e) {
+ Logger.AssertTrue(false, "Weave doesn't sync pref " + this.name);
+ }
+
+ // Modify the pref; throw an exception if the pref type is different
+ // than the value type specified in the test.
+ let prefType = Services.prefs.getPrefType(this.name);
+ switch (prefType) {
+ case Ci.nsIPrefBranch.PREF_INT:
+ Logger.AssertEqual(
+ typeof this.value,
+ "number",
+ "Wrong type used for preference value"
+ );
+ Services.prefs.setIntPref(this.name, this.value);
+ break;
+ case Ci.nsIPrefBranch.PREF_STRING:
+ Logger.AssertEqual(
+ typeof this.value,
+ "string",
+ "Wrong type used for preference value"
+ );
+ Services.prefs.setStringPref(this.name, this.value);
+ break;
+ case Ci.nsIPrefBranch.PREF_BOOL:
+ Logger.AssertEqual(
+ typeof this.value,
+ "boolean",
+ "Wrong type used for preference value"
+ );
+ Services.prefs.setBoolPref(this.name, this.value);
+ break;
+ }
+ },
+
+ /**
+ * Find
+ *
+ * Verifies that the preference this.name has the value
+ * this.value. Throws on error, or if the pref's type or value
+ * doesn't match.
+ *
+ * @return nothing
+ */
+ Find() {
+ // Read the pref value.
+ let value;
+ try {
+ let prefType = Services.prefs.getPrefType(this.name);
+ switch (prefType) {
+ case Ci.nsIPrefBranch.PREF_INT:
+ value = Services.prefs.getIntPref(this.name);
+ break;
+ case Ci.nsIPrefBranch.PREF_STRING:
+ value = Services.prefs.getStringPref(this.name);
+ break;
+ case Ci.nsIPrefBranch.PREF_BOOL:
+ value = Services.prefs.getBoolPref(this.name);
+ break;
+ }
+ } catch (e) {
+ Logger.AssertTrue(false, "Error accessing pref " + this.name);
+ }
+
+ // Throw an exception if the current and expected values aren't of
+ // the same type, or don't have the same values.
+ Logger.AssertEqual(
+ typeof value,
+ typeof this.value,
+ "Value types don't match"
+ );
+ Logger.AssertEqual(value, this.value, "Preference values don't match");
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/modules/tabs.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/tabs.sys.mjs
new file mode 100644
index 0000000000..8ea8f3b780
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/tabs.sys.mjs
@@ -0,0 +1,92 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ ChromeUtils.import() and acts as a singleton.
+ Only the following listed symbols will exposed on import, and only when
+ and where imported. */
+
+import { Weave } from "resource://services-sync/main.sys.mjs";
+
+import { Logger } from "resource://tps/logger.sys.mjs";
+
+// Unfortunately, due to where TPS is run, we can't directly reuse the logic from
+// BrowserTestUtils.sys.mjs. Moreover, we can't resolve the URI it loads the content
+// frame script from ("chrome://mochikit/content/tests/BrowserTestUtils/content-utils.js"),
+// hence the hackiness here and in BrowserTabs.Add.
+Services.mm.loadFrameScript(
+ "data:application/javascript;charset=utf-8," +
+ encodeURIComponent(`
+ addEventListener("load", function(event) {
+ let subframe = event.target != content.document;
+ sendAsyncMessage("tps:loadEvent", {subframe: subframe, url: event.target.documentURI});
+ }, true)`),
+ true,
+ true
+);
+
+export var BrowserTabs = {
+ /**
+ * Add
+ *
+ * Opens a new tab in the current browser window for the
+ * given uri. Rejects on error.
+ *
+ * @param uri The uri to load in the new tab
+ * @return Promise
+ */
+ async Add(uri) {
+ let mainWindow = Services.wm.getMostRecentWindow("navigator:browser");
+ let browser = mainWindow.gBrowser;
+ let newtab = browser.addTrustedTab(uri);
+
+ // Wait for the tab to load.
+ await new Promise(resolve => {
+ let mm = browser.ownerGlobal.messageManager;
+ mm.addMessageListener("tps:loadEvent", function onLoad(msg) {
+ mm.removeMessageListener("tps:loadEvent", onLoad);
+ resolve();
+ });
+ });
+
+ browser.selectedTab = newtab;
+ },
+
+ /**
+ * Find
+ *
+ * Finds the specified uri and title in Weave's list of remote tabs
+ * for the specified profile.
+ *
+ * @param uri The uri of the tab to find
+ * @param title The page title of the tab to find
+ * @param profile The profile to search for tabs
+ * @return true if the specified tab could be found, otherwise false
+ */
+ async Find(uri, title, profile) {
+ // Find the uri in Weave's list of tabs for the given profile.
+ let tabEngine = Weave.Service.engineManager.get("tabs");
+ for (let client of Weave.Service.clientsEngine.remoteClients) {
+ let tabClients = await tabEngine.getAllClients();
+ let tabClient = tabClients.find(x => x.id === client.id);
+ if (!tabClient || !tabClient.tabs) {
+ continue;
+ }
+ for (let key in tabClient.tabs) {
+ let tab = tabClient.tabs[key];
+ let weaveTabUrl = tab.urlHistory[0];
+ if (uri == weaveTabUrl && profile == client.name) {
+ if (title == undefined || title == tab.title) {
+ return true;
+ }
+ }
+ }
+ Logger.logInfo(
+ `Dumping tabs for ${client.name}...\n` +
+ JSON.stringify(tabClient.tabs, null, 2)
+ );
+ }
+ return false;
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/modules/windows.sys.mjs b/services/sync/tps/extensions/tps/resource/modules/windows.sys.mjs
new file mode 100644
index 0000000000..b0798b9031
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/modules/windows.sys.mjs
@@ -0,0 +1,32 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ ChromeUtils.import() and acts as a singleton.
+ Only the following listed symbols will exposed on import, and only when
+ and where imported. */
+
+export var BrowserWindows = {
+ /**
+ * Add
+ *
+ * Opens a new window. Throws on error.
+ *
+ * @param aPrivate The private option.
+ * @return nothing
+ */
+ Add(aPrivate, fn) {
+ return new Promise(resolve => {
+ let mainWindow = Services.wm.getMostRecentWindow("navigator:browser");
+ let win = mainWindow.OpenBrowserWindow({ private: aPrivate });
+ win.addEventListener(
+ "load",
+ function () {
+ resolve(win);
+ },
+ { once: true }
+ );
+ });
+ },
+};
diff --git a/services/sync/tps/extensions/tps/resource/quit.sys.mjs b/services/sync/tps/extensions/tps/resource/quit.sys.mjs
new file mode 100644
index 0000000000..e2cb8d8c22
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/quit.sys.mjs
@@ -0,0 +1,38 @@
+/* -*- indent-tabs-mode: nil -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ From mozilla/toolkit/content
+ These files did not have a license
+*/
+function canQuitApplication() {
+ try {
+ var cancelQuit = Cc["@mozilla.org/supports-PRBool;1"].createInstance(
+ Ci.nsISupportsPRBool
+ );
+ Services.obs.notifyObservers(cancelQuit, "quit-application-requested");
+
+ // Something aborted the quit process.
+ if (cancelQuit.data) {
+ return false;
+ }
+ } catch (ex) {}
+
+ return true;
+}
+
+export function goQuitApplication() {
+ if (!canQuitApplication()) {
+ return false;
+ }
+
+ try {
+ Services.startup.quit(Ci.nsIAppStartup.eForceQuit);
+ } catch (ex) {
+ throw new Error(`goQuitApplication: ${ex.message}`);
+ }
+
+ return true;
+}
diff --git a/services/sync/tps/extensions/tps/resource/tps.sys.mjs b/services/sync/tps/extensions/tps/resource/tps.sys.mjs
new file mode 100644
index 0000000000..2c4a5994a6
--- /dev/null
+++ b/services/sync/tps/extensions/tps/resource/tps.sys.mjs
@@ -0,0 +1,1583 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a JavaScript module (JSM) to be imported via
+ * ChromeUtils.import() and acts as a singleton. Only the following
+ * listed symbols will exposed on import, and only when and where imported.
+ */
+
+import { AppConstants } from "resource://gre/modules/AppConstants.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Addon: "resource://tps/modules/addons.sys.mjs",
+ AddonValidator: "resource://services-sync/engines/addons.sys.mjs",
+ Address: "resource://tps/modules/formautofill.sys.mjs",
+ Async: "resource://services-common/async.sys.mjs",
+ Authentication: "resource://tps/auth/fxaccounts.sys.mjs",
+ Bookmark: "resource://tps/modules/bookmarks.sys.mjs",
+ BookmarkFolder: "resource://tps/modules/bookmarks.sys.mjs",
+ BookmarkValidator: "resource://tps/modules/bookmarkValidator.sys.mjs",
+ BrowserTabs: "resource://tps/modules/tabs.sys.mjs",
+ BrowserWindows: "resource://tps/modules/windows.sys.mjs",
+ CommonUtils: "resource://services-common/utils.sys.mjs",
+ CreditCard: "resource://tps/modules/formautofill.sys.mjs",
+ DumpAddresses: "resource://tps/modules/formautofill.sys.mjs",
+ DumpBookmarks: "resource://tps/modules/bookmarks.sys.mjs",
+ DumpCreditCards: "resource://tps/modules/formautofill.sys.mjs",
+ DumpHistory: "resource://tps/modules/history.sys.mjs",
+ DumpPasswords: "resource://tps/modules/passwords.sys.mjs",
+ FileUtils: "resource://gre/modules/FileUtils.sys.mjs",
+ FormData: "resource://tps/modules/forms.sys.mjs",
+ FormValidator: "resource://services-sync/engines/forms.sys.mjs",
+ HistoryEntry: "resource://tps/modules/history.sys.mjs",
+ JsonSchema: "resource://gre/modules/JsonSchema.sys.mjs",
+ Livemark: "resource://tps/modules/bookmarks.sys.mjs",
+ Log: "resource://gre/modules/Log.sys.mjs",
+ Logger: "resource://tps/logger.sys.mjs",
+ NetUtil: "resource://gre/modules/NetUtil.sys.mjs",
+ Password: "resource://tps/modules/passwords.sys.mjs",
+ PasswordValidator: "resource://services-sync/engines/passwords.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+ Preference: "resource://tps/modules/prefs.sys.mjs",
+ STATUS_OK: "resource://services-sync/constants.sys.mjs",
+ Separator: "resource://tps/modules/bookmarks.sys.mjs",
+ SessionStore: "resource:///modules/sessionstore/SessionStore.sys.mjs",
+ Svc: "resource://services-sync/util.sys.mjs",
+ SyncTelemetry: "resource://services-sync/telemetry.sys.mjs",
+ WEAVE_VERSION: "resource://services-sync/constants.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+ extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
+});
+
+ChromeUtils.defineLazyGetter(lazy, "fileProtocolHandler", () => {
+ let fileHandler = Services.io.getProtocolHandler("file");
+ return fileHandler.QueryInterface(Ci.nsIFileProtocolHandler);
+});
+
+ChromeUtils.defineLazyGetter(lazy, "gTextDecoder", () => {
+ return new TextDecoder();
+});
+
+// Options for wiping data during a sync
+const SYNC_RESET_CLIENT = "resetClient";
+const SYNC_WIPE_CLIENT = "wipeClient";
+const SYNC_WIPE_REMOTE = "wipeRemote";
+
+// Actions a test can perform
+const ACTION_ADD = "add";
+const ACTION_DELETE = "delete";
+const ACTION_MODIFY = "modify";
+const ACTION_SET_ENABLED = "set-enabled";
+const ACTION_SYNC = "sync";
+const ACTION_SYNC_RESET_CLIENT = SYNC_RESET_CLIENT;
+const ACTION_SYNC_WIPE_CLIENT = SYNC_WIPE_CLIENT;
+const ACTION_SYNC_WIPE_REMOTE = SYNC_WIPE_REMOTE;
+const ACTION_VERIFY = "verify";
+const ACTION_VERIFY_NOT = "verify-not";
+
+const OBSERVER_TOPICS = [
+ "fxaccounts:onlogin",
+ "fxaccounts:onlogout",
+ "profile-before-change",
+ "weave:service:tracking-started",
+ "weave:service:tracking-stopped",
+ "weave:service:login:error",
+ "weave:service:setup-complete",
+ "weave:service:sync:finish",
+ "weave:service:sync:delayed",
+ "weave:service:sync:error",
+ "weave:service:sync:start",
+ "weave:service:resyncs-finished",
+ "places-browser-init-complete",
+];
+
+export var TPS = {
+ _currentAction: -1,
+ _currentPhase: -1,
+ _enabledEngines: null,
+ _errors: 0,
+ _isTracking: false,
+ _phaseFinished: false,
+ _phaselist: {},
+ _setupComplete: false,
+ _syncActive: false,
+ _syncCount: 0,
+ _syncsReportedViaTelemetry: 0,
+ _syncErrors: 0,
+ _syncWipeAction: null,
+ _tabsAdded: 0,
+ _tabsFinished: 0,
+ _test: null,
+ _triggeredSync: false,
+ _msSinceEpoch: 0,
+ _requestedQuit: false,
+ shouldValidateAddons: false,
+ shouldValidateBookmarks: false,
+ shouldValidatePasswords: false,
+ shouldValidateForms: false,
+ _placesInitDeferred: Promise.withResolvers(),
+ ACTIONS: [
+ ACTION_ADD,
+ ACTION_DELETE,
+ ACTION_MODIFY,
+ ACTION_SET_ENABLED,
+ ACTION_SYNC,
+ ACTION_SYNC_RESET_CLIENT,
+ ACTION_SYNC_WIPE_CLIENT,
+ ACTION_SYNC_WIPE_REMOTE,
+ ACTION_VERIFY,
+ ACTION_VERIFY_NOT,
+ ],
+
+ _init: function TPS__init() {
+ this.delayAutoSync();
+
+ OBSERVER_TOPICS.forEach(function (aTopic) {
+ Services.obs.addObserver(this, aTopic, true);
+ }, this);
+
+ // Some engines bump their score during their sync, which then causes
+ // another sync immediately (notably, prefs and addons). We don't want
+ // this to happen, and there's no obvious preference to kill it - so
+ // we do this nasty hack to ensure the global score is always zero.
+ Services.prefs.addObserver("services.sync.globalScore", () => {
+ if (lazy.Weave.Service.scheduler.globalScore != 0) {
+ lazy.Weave.Service.scheduler.globalScore = 0;
+ }
+ });
+ },
+
+ DumpError(msg, exc = null) {
+ this._errors++;
+ let errInfo;
+ if (exc) {
+ errInfo = lazy.Log.exceptionStr(exc); // includes details and stack-trace.
+ } else {
+ // always write a stack even if no error passed.
+ errInfo = lazy.Log.stackTrace(new Error());
+ }
+ lazy.Logger.logError(`[phase ${this._currentPhase}] ${msg} - ${errInfo}`);
+ this.quit();
+ },
+
+ QueryInterface: ChromeUtils.generateQI([
+ "nsIObserver",
+ "nsISupportsWeakReference",
+ ]),
+
+ observe: function TPS__observe(subject, topic, data) {
+ try {
+ lazy.Logger.logInfo("----------event observed: " + topic);
+
+ switch (topic) {
+ case "profile-before-change":
+ OBSERVER_TOPICS.forEach(function (topic) {
+ Services.obs.removeObserver(this, topic);
+ }, this);
+
+ lazy.Logger.close();
+
+ break;
+
+ case "places-browser-init-complete":
+ this._placesInitDeferred.resolve();
+ break;
+
+ case "weave:service:setup-complete":
+ this._setupComplete = true;
+
+ if (this._syncWipeAction) {
+ lazy.Weave.Svc.PrefBranch.setStringPref(
+ "firstSync",
+ this._syncWipeAction
+ );
+ this._syncWipeAction = null;
+ }
+
+ break;
+
+ case "weave:service:sync:error":
+ this._syncActive = false;
+
+ this.delayAutoSync();
+
+ // If this is the first sync error, retry...
+ if (this._syncErrors === 0) {
+ lazy.Logger.logInfo("Sync error; retrying...");
+ this._syncErrors++;
+ lazy.CommonUtils.nextTick(() => {
+ this.RunNextTestAction().catch(err => {
+ this.DumpError("RunNextTestActionFailed", err);
+ });
+ });
+ } else {
+ this._triggeredSync = false;
+ this.DumpError("Sync error; aborting test");
+ }
+
+ break;
+
+ case "weave:service:resyncs-finished":
+ this._syncActive = false;
+ this._syncErrors = 0;
+ this._triggeredSync = false;
+
+ this.delayAutoSync();
+ break;
+
+ case "weave:service:sync:start":
+ // Ensure that the sync operation has been started by TPS
+ if (!this._triggeredSync) {
+ this.DumpError(
+ "Automatic sync got triggered, which is not allowed."
+ );
+ }
+
+ this._syncActive = true;
+ break;
+
+ case "weave:service:tracking-started":
+ this._isTracking = true;
+ break;
+
+ case "weave:service:tracking-stopped":
+ this._isTracking = false;
+ break;
+
+ case "fxaccounts:onlogin":
+ // A user signed in - for TPS that always means sync - so configure
+ // that.
+ lazy.Weave.Service.configure().catch(e => {
+ this.DumpError("Configuring sync failed.", e);
+ });
+ break;
+
+ default:
+ lazy.Logger.logInfo(`unhandled event: ${topic}`);
+ }
+ } catch (e) {
+ this.DumpError("Observer failed", e);
+ }
+ },
+
+ /**
+ * Given that we cannot completely disable the automatic sync operations, we
+ * massively delay the next sync. Sync operations have to only happen when
+ * directly called via TPS.Sync()!
+ */
+ delayAutoSync: function TPS_delayAutoSync() {
+ lazy.Weave.Svc.PrefBranch.setIntPref("scheduler.immediateInterval", 7200);
+ lazy.Weave.Svc.PrefBranch.setIntPref("scheduler.idleInterval", 7200);
+ lazy.Weave.Svc.PrefBranch.setIntPref("scheduler.activeInterval", 7200);
+ lazy.Weave.Svc.PrefBranch.setIntPref("syncThreshold", 10000000);
+ },
+
+ quit: function TPS__quit() {
+ lazy.Logger.logInfo("quitting");
+ this._requestedQuit = true;
+ this.goQuitApplication();
+ },
+
+ async HandleWindows(aWindow, action) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on window " +
+ JSON.stringify(aWindow)
+ );
+ switch (action) {
+ case ACTION_ADD:
+ await lazy.BrowserWindows.Add(aWindow.private);
+ break;
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on windows"
+ );
+ },
+
+ async HandleTabs(tabs, action) {
+ for (let tab of tabs) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on tab " +
+ JSON.stringify(tab)
+ );
+ switch (action) {
+ case ACTION_ADD:
+ await lazy.BrowserTabs.Add(tab.uri);
+ break;
+ case ACTION_VERIFY:
+ lazy.Logger.AssertTrue(
+ typeof tab.profile != "undefined",
+ "profile must be defined when verifying tabs"
+ );
+ lazy.Logger.AssertTrue(
+ await lazy.BrowserTabs.Find(tab.uri, tab.title, tab.profile),
+ "error locating tab"
+ );
+ break;
+ case ACTION_VERIFY_NOT:
+ lazy.Logger.AssertTrue(
+ typeof tab.profile != "undefined",
+ "profile must be defined when verifying tabs"
+ );
+ lazy.Logger.AssertTrue(
+ await !lazy.BrowserTabs.Find(tab.uri, tab.title, tab.profile),
+ "tab found which was expected to be absent"
+ );
+ break;
+ default:
+ lazy.Logger.AssertTrue(false, "invalid action: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on tabs"
+ );
+ },
+
+ async HandlePrefs(prefs, action) {
+ for (let pref of prefs) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on pref " +
+ JSON.stringify(pref)
+ );
+ let preference = new lazy.Preference(pref);
+ switch (action) {
+ case ACTION_MODIFY:
+ preference.Modify();
+ break;
+ case ACTION_VERIFY:
+ preference.Find();
+ break;
+ default:
+ lazy.Logger.AssertTrue(false, "invalid action: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on pref"
+ );
+ },
+
+ async HandleForms(data, action) {
+ this.shouldValidateForms = true;
+ for (let datum of data) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on form entry " +
+ JSON.stringify(datum)
+ );
+ let formdata = new lazy.FormData(datum, this._msSinceEpoch);
+ switch (action) {
+ case ACTION_ADD:
+ await formdata.Create();
+ break;
+ case ACTION_DELETE:
+ await formdata.Remove();
+ break;
+ case ACTION_VERIFY:
+ lazy.Logger.AssertTrue(await formdata.Find(), "form data not found");
+ break;
+ case ACTION_VERIFY_NOT:
+ lazy.Logger.AssertTrue(
+ !(await formdata.Find()),
+ "form data found, but it shouldn't be present"
+ );
+ break;
+ default:
+ lazy.Logger.AssertTrue(false, "invalid action: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on formdata"
+ );
+ },
+
+ async HandleHistory(entries, action) {
+ try {
+ for (let entry of entries) {
+ const entryString = JSON.stringify(entry);
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on history entry " +
+ entryString
+ );
+ switch (action) {
+ case ACTION_ADD:
+ await lazy.HistoryEntry.Add(entry, this._msSinceEpoch);
+ break;
+ case ACTION_DELETE:
+ await lazy.HistoryEntry.Delete(entry, this._msSinceEpoch);
+ break;
+ case ACTION_VERIFY:
+ lazy.Logger.AssertTrue(
+ await lazy.HistoryEntry.Find(entry, this._msSinceEpoch),
+ "Uri visits not found in history database: " + entryString
+ );
+ break;
+ case ACTION_VERIFY_NOT:
+ lazy.Logger.AssertTrue(
+ !(await lazy.HistoryEntry.Find(entry, this._msSinceEpoch)),
+ "Uri visits found in history database, but they shouldn't be: " +
+ entryString
+ );
+ break;
+ default:
+ lazy.Logger.AssertTrue(false, "invalid action: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on history"
+ );
+ } catch (e) {
+ await lazy.DumpHistory();
+ throw e;
+ }
+ },
+
+ async HandlePasswords(passwords, action) {
+ this.shouldValidatePasswords = true;
+ try {
+ for (let password of passwords) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on password " +
+ JSON.stringify(password)
+ );
+ let passwordOb = new lazy.Password(password);
+ switch (action) {
+ case ACTION_ADD:
+ lazy.Logger.AssertTrue(
+ (await passwordOb.Create()) > -1,
+ "error adding password"
+ );
+ break;
+ case ACTION_VERIFY:
+ lazy.Logger.AssertTrue(
+ (await passwordOb.Find()) != -1,
+ "password not found"
+ );
+ break;
+ case ACTION_VERIFY_NOT:
+ lazy.Logger.AssertTrue(
+ (await passwordOb.Find()) == -1,
+ "password found, but it shouldn't exist"
+ );
+ break;
+ case ACTION_DELETE:
+ lazy.Logger.AssertTrue(
+ (await passwordOb.Find()) != -1,
+ "password not found"
+ );
+ passwordOb.Remove();
+ break;
+ case ACTION_MODIFY:
+ if (passwordOb.updateProps != null) {
+ lazy.Logger.AssertTrue(
+ (await passwordOb.Find()) != -1,
+ "password not found"
+ );
+ passwordOb.Update();
+ }
+ break;
+ default:
+ lazy.Logger.AssertTrue(false, "invalid action: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on passwords"
+ );
+ } catch (e) {
+ await lazy.DumpPasswords();
+ throw e;
+ }
+ },
+
+ async HandleAddons(addons, action, state) {
+ this.shouldValidateAddons = true;
+ for (let entry of addons) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on addon " +
+ JSON.stringify(entry)
+ );
+ let addon = new lazy.Addon(this, entry);
+ switch (action) {
+ case ACTION_ADD:
+ await addon.install();
+ break;
+ case ACTION_DELETE:
+ await addon.uninstall();
+ break;
+ case ACTION_VERIFY:
+ lazy.Logger.AssertTrue(
+ await addon.find(state),
+ "addon " + addon.id + " not found"
+ );
+ break;
+ case ACTION_VERIFY_NOT:
+ lazy.Logger.AssertFalse(
+ await addon.find(state),
+ "addon " + addon.id + " is present, but it shouldn't be"
+ );
+ break;
+ case ACTION_SET_ENABLED:
+ lazy.Logger.AssertTrue(
+ await addon.setEnabled(state),
+ "addon " + addon.id + " not found"
+ );
+ break;
+ default:
+ throw new Error("Unknown action for add-on: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on addons"
+ );
+ },
+
+ async HandleBookmarks(bookmarks, action) {
+ // wait for default bookmarks to be created.
+ await this._placesInitDeferred.promise;
+ this.shouldValidateBookmarks = true;
+ try {
+ let items = [];
+ for (let folder in bookmarks) {
+ let last_item_pos = -1;
+ for (let bookmark of bookmarks[folder]) {
+ lazy.Logger.clearPotentialError();
+ let placesItem;
+ bookmark.location = folder;
+
+ if (last_item_pos != -1) {
+ bookmark.last_item_pos = last_item_pos;
+ }
+ let itemGuid = null;
+
+ if (action != ACTION_MODIFY && action != ACTION_DELETE) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on bookmark " +
+ JSON.stringify(bookmark)
+ );
+ }
+
+ if ("uri" in bookmark) {
+ placesItem = new lazy.Bookmark(bookmark);
+ } else if ("folder" in bookmark) {
+ placesItem = new lazy.BookmarkFolder(bookmark);
+ } else if ("livemark" in bookmark) {
+ placesItem = new lazy.Livemark(bookmark);
+ } else if ("separator" in bookmark) {
+ placesItem = new lazy.Separator(bookmark);
+ }
+
+ if (action == ACTION_ADD) {
+ itemGuid = await placesItem.Create();
+ } else {
+ itemGuid = await placesItem.Find();
+ if (action == ACTION_VERIFY_NOT) {
+ lazy.Logger.AssertTrue(
+ itemGuid == null,
+ "places item exists but it shouldn't: " +
+ JSON.stringify(bookmark)
+ );
+ } else {
+ lazy.Logger.AssertTrue(itemGuid, "places item not found", true);
+ }
+ }
+
+ last_item_pos = await placesItem.GetItemIndex();
+ items.push(placesItem);
+ }
+ }
+
+ if (action == ACTION_DELETE || action == ACTION_MODIFY) {
+ for (let item of items) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on bookmark " +
+ JSON.stringify(item)
+ );
+ switch (action) {
+ case ACTION_DELETE:
+ await item.Remove();
+ break;
+ case ACTION_MODIFY:
+ if (item.updateProps != null) {
+ await item.Update();
+ }
+ break;
+ }
+ }
+ }
+
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on bookmarks"
+ );
+ } catch (e) {
+ await lazy.DumpBookmarks();
+ throw e;
+ }
+ },
+
+ async HandleAddresses(addresses, action) {
+ try {
+ for (let address of addresses) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on address " +
+ JSON.stringify(address)
+ );
+ let addressOb = new lazy.Address(address);
+ switch (action) {
+ case ACTION_ADD:
+ await addressOb.Create();
+ break;
+ case ACTION_MODIFY:
+ await addressOb.Update();
+ break;
+ case ACTION_VERIFY:
+ lazy.Logger.AssertTrue(await addressOb.Find(), "address not found");
+ break;
+ case ACTION_VERIFY_NOT:
+ lazy.Logger.AssertTrue(
+ !(await addressOb.Find()),
+ "address found, but it shouldn't exist"
+ );
+ break;
+ case ACTION_DELETE:
+ lazy.Logger.AssertTrue(await addressOb.Find(), "address not found");
+ await addressOb.Remove();
+ break;
+ default:
+ lazy.Logger.AssertTrue(false, "invalid action: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on addresses"
+ );
+ } catch (e) {
+ await lazy.DumpAddresses();
+ throw e;
+ }
+ },
+
+ async HandleCreditCards(creditCards, action) {
+ try {
+ for (let creditCard of creditCards) {
+ lazy.Logger.logInfo(
+ "executing action " +
+ action.toUpperCase() +
+ " on creditCard " +
+ JSON.stringify(creditCard)
+ );
+ let creditCardOb = new lazy.CreditCard(creditCard);
+ switch (action) {
+ case ACTION_ADD:
+ await creditCardOb.Create();
+ break;
+ case ACTION_MODIFY:
+ await creditCardOb.Update();
+ break;
+ case ACTION_VERIFY:
+ lazy.Logger.AssertTrue(
+ await creditCardOb.Find(),
+ "creditCard not found"
+ );
+ break;
+ case ACTION_VERIFY_NOT:
+ lazy.Logger.AssertTrue(
+ !(await creditCardOb.Find()),
+ "creditCard found, but it shouldn't exist"
+ );
+ break;
+ case ACTION_DELETE:
+ lazy.Logger.AssertTrue(
+ await creditCardOb.Find(),
+ "creditCard not found"
+ );
+ await creditCardOb.Remove();
+ break;
+ default:
+ lazy.Logger.AssertTrue(false, "invalid action: " + action);
+ }
+ }
+ lazy.Logger.logPass(
+ "executing action " + action.toUpperCase() + " on creditCards"
+ );
+ } catch (e) {
+ await lazy.DumpCreditCards();
+ throw e;
+ }
+ },
+
+ async Cleanup() {
+ try {
+ await this.WipeServer();
+ } catch (ex) {
+ lazy.Logger.logError(
+ "Failed to wipe server: " + lazy.Log.exceptionStr(ex)
+ );
+ }
+ try {
+ if (await lazy.Authentication.isLoggedIn()) {
+ // signout and wait for Sync to completely reset itself.
+ lazy.Logger.logInfo("signing out");
+ let waiter = this.promiseObserver("weave:service:start-over:finish");
+ await lazy.Authentication.signOut();
+ await waiter;
+ lazy.Logger.logInfo("signout complete");
+ }
+ await lazy.Authentication.deleteEmail(this.config.fx_account.username);
+ } catch (e) {
+ lazy.Logger.logError("Failed to sign out: " + lazy.Log.exceptionStr(e));
+ }
+ },
+
+ /**
+ * Use Sync's bookmark validation code to see if we've corrupted the tree.
+ */
+ async ValidateBookmarks() {
+ let getServerBookmarkState = async () => {
+ let bookmarkEngine = lazy.Weave.Service.engineManager.get("bookmarks");
+ let collection = bookmarkEngine.itemSource();
+ let collectionKey =
+ bookmarkEngine.service.collectionKeys.keyForCollection(
+ bookmarkEngine.name
+ );
+ collection.full = true;
+ let items = [];
+ let resp = await collection.get();
+ for (let json of resp.obj) {
+ let record = new collection._recordObj();
+ record.deserialize(json);
+ await record.decrypt(collectionKey);
+ items.push(record.cleartext);
+ }
+ return items;
+ };
+ let serverRecordDumpStr;
+ try {
+ lazy.Logger.logInfo("About to perform bookmark validation");
+ let clientTree = await lazy.PlacesUtils.promiseBookmarksTree("", {
+ includeItemIds: true,
+ });
+ let serverRecords = await getServerBookmarkState();
+ // We can't wait until catch to stringify this, since at that point it will have cycles.
+ serverRecordDumpStr = JSON.stringify(serverRecords);
+
+ let validator = new lazy.BookmarkValidator();
+ let { problemData } = await validator.compareServerWithClient(
+ serverRecords,
+ clientTree
+ );
+
+ for (let { name, count } of problemData.getSummary()) {
+ // Exclude mobile showing up on the server hackily so that we don't
+ // report it every time, see bug 1273234 and 1274394 for more information.
+ if (
+ name === "serverUnexpected" &&
+ problemData.serverUnexpected.includes("mobile")
+ ) {
+ --count;
+ }
+ if (count) {
+ // Log this out before we assert. This is useful in the context of TPS logs, since we
+ // can see the IDs in the test files.
+ lazy.Logger.logInfo(
+ `Validation problem: "${name}": ${JSON.stringify(
+ problemData[name]
+ )}`
+ );
+ }
+ lazy.Logger.AssertEqual(
+ count,
+ 0,
+ `Bookmark validation error of type ${name}`
+ );
+ }
+ } catch (e) {
+ // Dump the client records (should always be doable)
+ lazy.DumpBookmarks();
+ // Dump the server records if gotten them already.
+ if (serverRecordDumpStr) {
+ lazy.Logger.logInfo(
+ "Server bookmark records:\n" + serverRecordDumpStr + "\n"
+ );
+ }
+ this.DumpError("Bookmark validation failed", e);
+ }
+ lazy.Logger.logInfo("Bookmark validation finished");
+ },
+
+ async ValidateCollection(engineName, ValidatorType) {
+ let serverRecordDumpStr;
+ let clientRecordDumpStr;
+ try {
+ lazy.Logger.logInfo(`About to perform validation for "${engineName}"`);
+ let engine = lazy.Weave.Service.engineManager.get(engineName);
+ let validator = new ValidatorType(engine);
+ let serverRecords = await validator.getServerItems(engine);
+ let clientRecords = await validator.getClientItems();
+ try {
+ // This substantially improves the logs for addons while not making a
+ // substantial difference for the other two
+ clientRecordDumpStr = JSON.stringify(
+ clientRecords.map(r => {
+ let res = validator.normalizeClientItem(r);
+ delete res.original; // Try and prevent cyclic references
+ return res;
+ })
+ );
+ } catch (e) {
+ // ignore the error, the dump string is just here to make debugging easier.
+ clientRecordDumpStr = "<Cyclic value>";
+ }
+ try {
+ serverRecordDumpStr = JSON.stringify(serverRecords);
+ } catch (e) {
+ // as above
+ serverRecordDumpStr = "<Cyclic value>";
+ }
+ let { problemData } = await validator.compareClientWithServer(
+ clientRecords,
+ serverRecords
+ );
+ for (let { name, count } of problemData.getSummary()) {
+ if (count) {
+ lazy.Logger.logInfo(
+ `Validation problem: "${name}": ${JSON.stringify(
+ problemData[name]
+ )}`
+ );
+ }
+ lazy.Logger.AssertEqual(
+ count,
+ 0,
+ `Validation error for "${engineName}" of type "${name}"`
+ );
+ }
+ } catch (e) {
+ // Dump the client records if possible
+ if (clientRecordDumpStr) {
+ lazy.Logger.logInfo(
+ `Client state for ${engineName}:\n${clientRecordDumpStr}\n`
+ );
+ }
+ // Dump the server records if gotten them already.
+ if (serverRecordDumpStr) {
+ lazy.Logger.logInfo(
+ `Server state for ${engineName}:\n${serverRecordDumpStr}\n`
+ );
+ }
+ this.DumpError(`Validation failed for ${engineName}`, e);
+ }
+ lazy.Logger.logInfo(`Validation finished for ${engineName}`);
+ },
+
+ ValidatePasswords() {
+ return this.ValidateCollection("passwords", lazy.PasswordValidator);
+ },
+
+ ValidateForms() {
+ return this.ValidateCollection("forms", lazy.FormValidator);
+ },
+
+ ValidateAddons() {
+ return this.ValidateCollection("addons", lazy.AddonValidator);
+ },
+
+ async RunNextTestAction() {
+ lazy.Logger.logInfo("Running next test action");
+ try {
+ if (this._currentAction >= this._phaselist[this._currentPhase].length) {
+ // Run necessary validations and then finish up
+ lazy.Logger.logInfo("No more actions - running validations...");
+ if (this.shouldValidateBookmarks) {
+ await this.ValidateBookmarks();
+ }
+ if (this.shouldValidatePasswords) {
+ await this.ValidatePasswords();
+ }
+ if (this.shouldValidateForms) {
+ await this.ValidateForms();
+ }
+ if (this.shouldValidateAddons) {
+ await this.ValidateAddons();
+ }
+ // Force this early so that we run the validation and detect missing pings
+ // *before* we start shutting down, since if we do it after, the python
+ // code won't notice the failure.
+ lazy.SyncTelemetry.shutdown();
+ // we're all done
+ lazy.Logger.logInfo(
+ "test phase " +
+ this._currentPhase +
+ ": " +
+ (this._errors ? "FAIL" : "PASS")
+ );
+ this._phaseFinished = true;
+ this.quit();
+ return;
+ }
+ this.seconds_since_epoch = Services.prefs.getIntPref(
+ "tps.seconds_since_epoch"
+ );
+ if (this.seconds_since_epoch) {
+ // Places dislikes it if we add visits in the future. We pretend the
+ // real time is 1 minute ago to avoid issues caused by places using a
+ // different clock than the one that set the seconds_since_epoch pref.
+ this._msSinceEpoch = (this.seconds_since_epoch - 60) * 1000;
+ } else {
+ this.DumpError("seconds-since-epoch not set");
+ return;
+ }
+
+ let phase = this._phaselist[this._currentPhase];
+ let action = phase[this._currentAction];
+ lazy.Logger.logInfo("starting action: " + action[0].name);
+ await action[0].apply(this, action.slice(1));
+
+ this._currentAction++;
+ } catch (e) {
+ if (lazy.Async.isShutdownException(e)) {
+ if (this._requestedQuit) {
+ lazy.Logger.logInfo("Sync aborted due to requested shutdown");
+ } else {
+ this.DumpError(
+ "Sync aborted due to shutdown, but we didn't request it"
+ );
+ }
+ } else {
+ this.DumpError("RunNextTestAction failed", e);
+ }
+ return;
+ }
+ await this.RunNextTestAction();
+ },
+
+ _getFileRelativeToSourceRoot(testFileURL, relativePath) {
+ let file = lazy.fileProtocolHandler.getFileFromURLSpec(testFileURL);
+ let root = file.parent.parent.parent.parent.parent; // <root>/services/sync/tests/tps/test_foo.js // <root>/services/sync/tests/tps // <root>/services/sync/tests // <root>/services/sync // <root>/services // <root>
+ root.appendRelativePath(relativePath);
+ root.normalize();
+ return root;
+ },
+
+ _pingValidator: null,
+
+ // Default ping validator that always says the ping passes. This should be
+ // overridden unless the `testing.tps.skipPingValidation` pref is true.
+ get pingValidator() {
+ return this._pingValidator
+ ? this._pingValidator
+ : {
+ validate() {
+ lazy.Logger.logInfo(
+ "Not validating ping -- disabled by pref or failure to load schema"
+ );
+ return { valid: true, errors: [] };
+ },
+ };
+ },
+
+ // Attempt to load the sync_ping_schema.json and initialize `this.pingValidator`
+ // based on the source of the tps file. Assumes that it's at "../unit/sync_ping_schema.json"
+ // relative to the directory the tps test file (testFile) is contained in.
+ _tryLoadPingSchema(testFile) {
+ if (Services.prefs.getBoolPref("testing.tps.skipPingValidation", false)) {
+ return;
+ }
+ try {
+ let schemaFile = this._getFileRelativeToSourceRoot(
+ testFile,
+ "services/sync/tests/unit/sync_ping_schema.json"
+ );
+
+ let stream = Cc[
+ "@mozilla.org/network/file-input-stream;1"
+ ].createInstance(Ci.nsIFileInputStream);
+
+ stream.init(
+ schemaFile,
+ lazy.FileUtils.MODE_RDONLY,
+ lazy.FileUtils.PERMS_FILE,
+ 0
+ );
+
+ let bytes = lazy.NetUtil.readInputStream(stream, stream.available());
+ let schema = JSON.parse(lazy.gTextDecoder.decode(bytes));
+ lazy.Logger.logInfo("Successfully loaded schema");
+
+ this._pingValidator = new lazy.JsonSchema.Validator(schema);
+ } catch (e) {
+ this.DumpError(
+ `Failed to load ping schema relative to "${testFile}".`,
+ e
+ );
+ }
+ },
+
+ /**
+ * Runs a single test phase.
+ *
+ * This is the main entry point for each phase of a test. The TPS command
+ * line driver loads this module and calls into the function with the
+ * arguments from the command line.
+ *
+ * When a phase is executed, the file is loaded as JavaScript into the
+ * current object.
+ *
+ * The following keys in the options argument have meaning:
+ *
+ * - ignoreUnusedEngines If true, unused engines will be unloaded from
+ * Sync. This makes output easier to parse and is
+ * useful for debugging test failures.
+ *
+ * @param file
+ * String URI of the file to open.
+ * @param phase
+ * String name of the phase to run.
+ * @param logpath
+ * String path of the log file to write to.
+ * @param options
+ * Object defining addition run-time options.
+ */
+ async RunTestPhase(file, phase, logpath, options) {
+ try {
+ let settings = options || {};
+
+ lazy.Logger.init(logpath);
+ lazy.Logger.logInfo("Sync version: " + lazy.WEAVE_VERSION);
+ lazy.Logger.logInfo("Firefox buildid: " + Services.appinfo.appBuildID);
+ lazy.Logger.logInfo("Firefox version: " + Services.appinfo.version);
+ lazy.Logger.logInfo(
+ "Firefox source revision: " +
+ (AppConstants.SOURCE_REVISION_URL || "unknown")
+ );
+ lazy.Logger.logInfo("Firefox platform: " + AppConstants.platform);
+
+ // do some sync housekeeping
+ if (lazy.Weave.Service.isLoggedIn) {
+ this.DumpError("Sync logged in on startup...profile may be dirty");
+ return;
+ }
+
+ // Wait for Sync service to become ready.
+ if (!lazy.Weave.Status.ready) {
+ this.waitForEvent("weave:service:ready");
+ }
+
+ await lazy.Weave.Service.promiseInitialized;
+
+ // We only want to do this if we modified the bookmarks this phase.
+ this.shouldValidateBookmarks = false;
+
+ // Always give Sync an extra tick to initialize. If we waited for the
+ // service:ready event, this is required to ensure all handlers have
+ // executed.
+ await lazy.Async.promiseYield();
+ await this._executeTestPhase(file, phase, settings);
+ } catch (e) {
+ this.DumpError("RunTestPhase failed", e);
+ }
+ },
+
+ /**
+ * Executes a single test phase.
+ *
+ * This is called by RunTestPhase() after the environment is validated.
+ */
+ async _executeTestPhase(file, phase, settings) {
+ try {
+ this.config = JSON.parse(Services.prefs.getStringPref("tps.config"));
+ // parse the test file
+ Services.scriptloader.loadSubScript(file, this);
+ this._currentPhase = phase;
+ // cleanup phases are in the format `cleanup-${profileName}`.
+ if (this._currentPhase.startsWith("cleanup-")) {
+ let profileToClean = this._currentPhase.slice("cleanup-".length);
+ this.phases[this._currentPhase] = profileToClean;
+ this.Phase(this._currentPhase, [[this.Cleanup]]);
+ } else {
+ // Don't bother doing this for cleanup phases.
+ this._tryLoadPingSchema(file);
+ }
+ let this_phase = this._phaselist[this._currentPhase];
+
+ if (this_phase == undefined) {
+ this.DumpError("invalid phase " + this._currentPhase);
+ return;
+ }
+
+ if (this.phases[this._currentPhase] == undefined) {
+ this.DumpError("no profile defined for phase " + this._currentPhase);
+ return;
+ }
+
+ // If we have restricted the active engines, unregister engines we don't
+ // care about.
+ if (settings.ignoreUnusedEngines && Array.isArray(this._enabledEngines)) {
+ let names = {};
+ for (let name of this._enabledEngines) {
+ names[name] = true;
+ }
+ for (let engine of lazy.Weave.Service.engineManager.getEnabled()) {
+ if (!(engine.name in names)) {
+ lazy.Logger.logInfo("Unregistering unused engine: " + engine.name);
+ await lazy.Weave.Service.engineManager.unregister(engine);
+ }
+ }
+ }
+ lazy.Logger.logInfo("Starting phase " + this._currentPhase);
+
+ lazy.Logger.logInfo(
+ "setting client.name to " + this.phases[this._currentPhase]
+ );
+ lazy.Weave.Svc.PrefBranch.setStringPref(
+ "client.name",
+ this.phases[this._currentPhase]
+ );
+
+ this._interceptSyncTelemetry();
+
+ // start processing the test actions
+ this._currentAction = 0;
+ await lazy.SessionStore.promiseAllWindowsRestored;
+ await this.RunNextTestAction();
+ } catch (e) {
+ this.DumpError("_executeTestPhase failed", e);
+ }
+ },
+
+ /**
+ * Override sync telemetry functions so that we can detect errors generating
+ * the sync ping, and count how many pings we report.
+ */
+ _interceptSyncTelemetry() {
+ let originalObserve = lazy.SyncTelemetry.observe;
+ let self = this;
+ lazy.SyncTelemetry.observe = function () {
+ try {
+ originalObserve.apply(this, arguments);
+ } catch (e) {
+ self.DumpError("Error when generating sync telemetry", e);
+ }
+ };
+ lazy.SyncTelemetry.submit = record => {
+ lazy.Logger.logInfo(
+ "Intercepted sync telemetry submission: " + JSON.stringify(record)
+ );
+ this._syncsReportedViaTelemetry +=
+ record.syncs.length + (record.discarded || 0);
+ if (record.discarded) {
+ if (record.syncs.length != lazy.SyncTelemetry.maxPayloadCount) {
+ this.DumpError(
+ "Syncs discarded from ping before maximum payload count reached"
+ );
+ }
+ }
+ // If this is the shutdown ping, check and see that the telemetry saw all the syncs.
+ if (record.why === "shutdown") {
+ // If we happen to sync outside of tps manually causing it, its not an
+ // error in the telemetry, so we only complain if we didn't see all of them.
+ if (this._syncsReportedViaTelemetry < this._syncCount) {
+ this.DumpError(
+ `Telemetry missed syncs: Saw ${this._syncsReportedViaTelemetry}, should have >= ${this._syncCount}.`
+ );
+ }
+ }
+ if (!record.syncs.length) {
+ // Note: we're overwriting submit, so this is called even for pings that
+ // may have no data (which wouldn't be submitted to telemetry and would
+ // fail validation).
+ return;
+ }
+ // Our ping may have some undefined values, which we rely on JSON stripping
+ // out as part of the ping submission - but our validator fails with them,
+ // so round-trip via JSON here to avoid that.
+ record = JSON.parse(JSON.stringify(record));
+ const result = this.pingValidator.validate(record);
+ if (!result.valid) {
+ // Note that we already logged the record.
+ this.DumpError(
+ "Sync ping validation failed with errors: " +
+ JSON.stringify(result.errors)
+ );
+ }
+ };
+ },
+
+ /**
+ * Register a single phase with the test harness.
+ *
+ * This is called when loading individual test files.
+ *
+ * @param phasename
+ * String name of the phase being loaded.
+ * @param fnlist
+ * Array of functions/actions to perform.
+ */
+ Phase: function Test__Phase(phasename, fnlist) {
+ if (Object.keys(this._phaselist).length === 0) {
+ // This is the first phase we should force a log in
+ fnlist.unshift([this.Login]);
+ }
+ this._phaselist[phasename] = fnlist;
+ },
+
+ /**
+ * Restrict enabled Sync engines to a specified set.
+ *
+ * This can be called by a test to limit what engines are enabled. It is
+ * recommended to call it to reduce the overhead and log clutter for the
+ * test.
+ *
+ * The "clients" engine is special and is always enabled, so there is no
+ * need to specify it.
+ *
+ * @param names
+ * Array of Strings for engines to make active during the test.
+ */
+ EnableEngines: function EnableEngines(names) {
+ if (!Array.isArray(names)) {
+ throw new Error(
+ "Argument to RestrictEngines() is not an array: " + typeof names
+ );
+ }
+
+ this._enabledEngines = names;
+ },
+
+ /**
+ * Returns a promise that resolves when a specific observer notification is
+ * resolved. This is similar to the various waitFor* functions, although is
+ * typically safer if you need to do some other work that may make the event
+ * fire.
+ *
+ * eg:
+ * doSomething(); // causes the event to be fired.
+ * await promiseObserver("something");
+ * is risky as the call to doSomething may trigger the event before the
+ * promiseObserver call is made. Contrast with:
+ *
+ * let waiter = promiseObserver("something");
+ * doSomething(); // causes the event to be fired.
+ * await waiter; // will return as soon as the event fires, even if it fires
+ * // before this function is called.
+ *
+ * @param aEventName
+ * String event to wait for.
+ */
+ promiseObserver(aEventName) {
+ return new Promise(resolve => {
+ lazy.Logger.logInfo("Setting up wait for " + aEventName + "...");
+ let handler = () => {
+ lazy.Logger.logInfo("Observed " + aEventName);
+ lazy.Svc.Obs.remove(aEventName, handler);
+ resolve();
+ };
+ lazy.Svc.Obs.add(aEventName, handler);
+ });
+ },
+
+ /**
+ * Wait for the named event to be observed.
+ *
+ * Note that in general, you should probably use promiseObserver unless you
+ * are 100% sure that the event being waited on can only be sent after this
+ * call adds the listener.
+ *
+ * @param aEventName
+ * String event to wait for.
+ */
+ async waitForEvent(aEventName) {
+ await this.promiseObserver(aEventName);
+ },
+
+ /**
+ * Waits for Sync to logged in before returning
+ */
+ async waitForSetupComplete() {
+ if (!this._setupComplete) {
+ await this.waitForEvent("weave:service:setup-complete");
+ }
+ },
+
+ /**
+ * Waits for Sync to be finished before returning
+ */
+ async waitForSyncFinished() {
+ if (lazy.Weave.Service.locked) {
+ await this.waitForEvent("weave:service:resyncs-finished");
+ }
+ },
+
+ /**
+ * Waits for Sync to start tracking before returning.
+ */
+ async waitForTracking() {
+ if (!this._isTracking) {
+ await this.waitForEvent("weave:service:tracking-started");
+ }
+ },
+
+ /**
+ * Login on the server
+ */
+ async Login() {
+ if (await lazy.Authentication.isReady()) {
+ return;
+ }
+
+ lazy.Logger.logInfo("Setting client credentials and login.");
+ await lazy.Authentication.signIn(this.config.fx_account);
+ await this.waitForSetupComplete();
+ lazy.Logger.AssertEqual(
+ lazy.Weave.Status.service,
+ lazy.STATUS_OK,
+ "Weave status OK"
+ );
+ await this.waitForTracking();
+ },
+
+ /**
+ * Triggers a sync operation
+ *
+ * @param {String} [wipeAction]
+ * Type of wipe to perform (resetClient, wipeClient, wipeRemote)
+ *
+ */
+ async Sync(wipeAction) {
+ if (this._syncActive) {
+ this.DumpError("Sync currently active which should be impossible");
+ return;
+ }
+ lazy.Logger.logInfo(
+ "Executing Sync" + (wipeAction ? ": " + wipeAction : "")
+ );
+
+ // Force a wipe action if requested. In case of an initial sync the pref
+ // will be overwritten by Sync itself (see bug 992198), so ensure that we
+ // also handle it via the "weave:service:setup-complete" notification.
+ if (wipeAction) {
+ this._syncWipeAction = wipeAction;
+ lazy.Weave.Svc.PrefBranch.setStringPref("firstSync", wipeAction);
+ } else {
+ lazy.Weave.Svc.PrefBranch.clearUserPref("firstSync");
+ }
+ if (!(await lazy.Weave.Service.login())) {
+ // We need to complete verification.
+ lazy.Logger.logInfo("Logging in before performing sync");
+ await this.Login();
+ }
+ ++this._syncCount;
+
+ lazy.Logger.logInfo(
+ "Executing Sync" + (wipeAction ? ": " + wipeAction : "")
+ );
+ this._triggeredSync = true;
+ await lazy.Weave.Service.sync();
+ lazy.Logger.logInfo("Sync is complete");
+ // wait a second for things to settle...
+ await new Promise(resolve => {
+ lazy.CommonUtils.namedTimer(resolve, 1000, this, "postsync");
+ });
+ },
+
+ async WipeServer() {
+ lazy.Logger.logInfo("Wiping data from server.");
+
+ await this.Login();
+ await lazy.Weave.Service.login();
+ await lazy.Weave.Service.wipeServer();
+ },
+
+ /**
+ * Action which ensures changes are being tracked before returning.
+ */
+ async EnsureTracking() {
+ await this.Login();
+ await this.waitForTracking();
+ },
+
+ Addons: {
+ async install(addons) {
+ await TPS.HandleAddons(addons, ACTION_ADD);
+ },
+ async setEnabled(addons, state) {
+ await TPS.HandleAddons(addons, ACTION_SET_ENABLED, state);
+ },
+ async uninstall(addons) {
+ await TPS.HandleAddons(addons, ACTION_DELETE);
+ },
+ async verify(addons, state) {
+ await TPS.HandleAddons(addons, ACTION_VERIFY, state);
+ },
+ async verifyNot(addons) {
+ await TPS.HandleAddons(addons, ACTION_VERIFY_NOT);
+ },
+ skipValidation() {
+ TPS.shouldValidateAddons = false;
+ },
+ },
+
+ Addresses: {
+ async add(addresses) {
+ await this.HandleAddresses(addresses, ACTION_ADD);
+ },
+ async modify(addresses) {
+ await this.HandleAddresses(addresses, ACTION_MODIFY);
+ },
+ async delete(addresses) {
+ await this.HandleAddresses(addresses, ACTION_DELETE);
+ },
+ async verify(addresses) {
+ await this.HandleAddresses(addresses, ACTION_VERIFY);
+ },
+ async verifyNot(addresses) {
+ await this.HandleAddresses(addresses, ACTION_VERIFY_NOT);
+ },
+ },
+
+ Bookmarks: {
+ async add(bookmarks) {
+ await TPS.HandleBookmarks(bookmarks, ACTION_ADD);
+ },
+ async modify(bookmarks) {
+ await TPS.HandleBookmarks(bookmarks, ACTION_MODIFY);
+ },
+ async delete(bookmarks) {
+ await TPS.HandleBookmarks(bookmarks, ACTION_DELETE);
+ },
+ async verify(bookmarks) {
+ await TPS.HandleBookmarks(bookmarks, ACTION_VERIFY);
+ },
+ async verifyNot(bookmarks) {
+ await TPS.HandleBookmarks(bookmarks, ACTION_VERIFY_NOT);
+ },
+ skipValidation() {
+ TPS.shouldValidateBookmarks = false;
+ },
+ },
+ CreditCards: {
+ async add(creditCards) {
+ await this.HandleCreditCards(creditCards, ACTION_ADD);
+ },
+ async modify(creditCards) {
+ await this.HandleCreditCards(creditCards, ACTION_MODIFY);
+ },
+ async delete(creditCards) {
+ await this.HandleCreditCards(creditCards, ACTION_DELETE);
+ },
+ async verify(creditCards) {
+ await this.HandleCreditCards(creditCards, ACTION_VERIFY);
+ },
+ async verifyNot(creditCards) {
+ await this.HandleCreditCards(creditCards, ACTION_VERIFY_NOT);
+ },
+ },
+
+ Formdata: {
+ async add(formdata) {
+ await this.HandleForms(formdata, ACTION_ADD);
+ },
+ async delete(formdata) {
+ await this.HandleForms(formdata, ACTION_DELETE);
+ },
+ async verify(formdata) {
+ await this.HandleForms(formdata, ACTION_VERIFY);
+ },
+ async verifyNot(formdata) {
+ await this.HandleForms(formdata, ACTION_VERIFY_NOT);
+ },
+ },
+ History: {
+ async add(history) {
+ await this.HandleHistory(history, ACTION_ADD);
+ },
+ async delete(history) {
+ await this.HandleHistory(history, ACTION_DELETE);
+ },
+ async verify(history) {
+ await this.HandleHistory(history, ACTION_VERIFY);
+ },
+ async verifyNot(history) {
+ await this.HandleHistory(history, ACTION_VERIFY_NOT);
+ },
+ },
+ Passwords: {
+ async add(passwords) {
+ await this.HandlePasswords(passwords, ACTION_ADD);
+ },
+ async modify(passwords) {
+ await this.HandlePasswords(passwords, ACTION_MODIFY);
+ },
+ async delete(passwords) {
+ await this.HandlePasswords(passwords, ACTION_DELETE);
+ },
+ async verify(passwords) {
+ await this.HandlePasswords(passwords, ACTION_VERIFY);
+ },
+ async verifyNot(passwords) {
+ await this.HandlePasswords(passwords, ACTION_VERIFY_NOT);
+ },
+ skipValidation() {
+ TPS.shouldValidatePasswords = false;
+ },
+ },
+ Prefs: {
+ async modify(prefs) {
+ await TPS.HandlePrefs(prefs, ACTION_MODIFY);
+ },
+ async verify(prefs) {
+ await TPS.HandlePrefs(prefs, ACTION_VERIFY);
+ },
+ },
+ Tabs: {
+ async add(tabs) {
+ await TPS.HandleTabs(tabs, ACTION_ADD);
+ },
+ async verify(tabs) {
+ await TPS.HandleTabs(tabs, ACTION_VERIFY);
+ },
+ async verifyNot(tabs) {
+ await TPS.HandleTabs(tabs, ACTION_VERIFY_NOT);
+ },
+ },
+ Windows: {
+ async add(aWindow) {
+ await TPS.HandleWindows(aWindow, ACTION_ADD);
+ },
+ },
+
+ // Jumping through loads of hoops via calling back into a "HandleXXX" method
+ // and adding an ACTION_XXX indirection adds no value - let's KISS!
+ // eslint-disable-next-line no-unused-vars
+ ExtStorage: {
+ async set(id, data) {
+ lazy.Logger.logInfo(`setting data for '${id}': ${data}`);
+ await lazy.extensionStorageSync.set({ id }, data);
+ },
+ async verify(id, keys, data) {
+ let got = await lazy.extensionStorageSync.get({ id }, keys);
+ lazy.Logger.AssertEqual(got, data, `data for '${id}'/${keys}`);
+ },
+ },
+};
+
+// Initialize TPS
+TPS._init();
diff --git a/services/sync/tps/extensions/tps/schema.json b/services/sync/tps/extensions/tps/schema.json
new file mode 100644
index 0000000000..fe51488c70
--- /dev/null
+++ b/services/sync/tps/extensions/tps/schema.json
@@ -0,0 +1 @@
+[]