summaryrefslogtreecommitdiffstats
path: root/services/sync/modules
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
commit0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d (patch)
treea31f07c9bcca9d56ce61e9a1ffd30ef350d513aa /services/sync/modules
parentInitial commit. (diff)
downloadfirefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.tar.xz
firefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.zip
Adding upstream version 115.8.0esr.upstream/115.8.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'services/sync/modules')
-rw-r--r--services/sync/modules/SyncDisconnect.sys.mjs240
-rw-r--r--services/sync/modules/SyncedTabs.sys.mjs332
-rw-r--r--services/sync/modules/UIState.sys.mjs287
-rw-r--r--services/sync/modules/addonsreconciler.sys.mjs584
-rw-r--r--services/sync/modules/addonutils.sys.mjs390
-rw-r--r--services/sync/modules/bridged_engine.sys.mjs499
-rw-r--r--services/sync/modules/collection_validator.sys.mjs267
-rw-r--r--services/sync/modules/constants.sys.mjs133
-rw-r--r--services/sync/modules/doctor.sys.mjs188
-rw-r--r--services/sync/modules/engines.sys.mjs2260
-rw-r--r--services/sync/modules/engines/addons.sys.mjs820
-rw-r--r--services/sync/modules/engines/bookmarks.sys.mjs953
-rw-r--r--services/sync/modules/engines/clients.sys.mjs1123
-rw-r--r--services/sync/modules/engines/extension-storage.sys.mjs303
-rw-r--r--services/sync/modules/engines/forms.sys.mjs298
-rw-r--r--services/sync/modules/engines/history.sys.mjs585
-rw-r--r--services/sync/modules/engines/passwords.sys.mjs565
-rw-r--r--services/sync/modules/engines/prefs.sys.mjs467
-rw-r--r--services/sync/modules/engines/tabs.sys.mjs624
-rw-r--r--services/sync/modules/keys.sys.mjs166
-rw-r--r--services/sync/modules/main.sys.mjs25
-rw-r--r--services/sync/modules/policies.sys.mjs1057
-rw-r--r--services/sync/modules/record.sys.mjs1335
-rw-r--r--services/sync/modules/resource.sys.mjs292
-rw-r--r--services/sync/modules/service.sys.mjs1630
-rw-r--r--services/sync/modules/stages/declined.sys.mjs78
-rw-r--r--services/sync/modules/stages/enginesync.sys.mjs400
-rw-r--r--services/sync/modules/status.sys.mjs135
-rw-r--r--services/sync/modules/sync_auth.sys.mjs656
-rw-r--r--services/sync/modules/telemetry.sys.mjs1313
-rw-r--r--services/sync/modules/util.sys.mjs783
31 files changed, 18788 insertions, 0 deletions
diff --git a/services/sync/modules/SyncDisconnect.sys.mjs b/services/sync/modules/SyncDisconnect.sys.mjs
new file mode 100644
index 0000000000..5829085084
--- /dev/null
+++ b/services/sync/modules/SyncDisconnect.sys.mjs
@@ -0,0 +1,240 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This module provides a facility for disconnecting Sync and FxA, optionally
+// sanitizing profile data as part of the process.
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AsyncShutdown: "resource://gre/modules/AsyncShutdown.sys.mjs",
+ Log: "resource://gre/modules/Log.sys.mjs",
+ Sanitizer: "resource:///modules/Sanitizer.sys.mjs",
+ Utils: "resource://services-sync/util.sys.mjs",
+ setTimeout: "resource://gre/modules/Timer.sys.mjs",
+});
+
+XPCOMUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+XPCOMUtils.defineLazyGetter(lazy, "FxAccountsCommon", function () {
+ return ChromeUtils.import("resource://gre/modules/FxAccountsCommon.js");
+});
+
+export const SyncDisconnectInternal = {
+ lockRetryInterval: 1000, // wait 1 seconds before trying for the lock again.
+ lockRetryCount: 120, // Try 120 times (==2 mins) before giving up in disgust.
+ promiseDisconnectFinished: null, // If we are sanitizing, a promise for completion.
+
+ // mocked by tests.
+ getWeave() {
+ return ChromeUtils.importESModule("resource://services-sync/main.sys.mjs")
+ .Weave;
+ },
+
+ // Returns a promise that resolves when we are not syncing, waiting until
+ // a current Sync completes if necessary. Resolves with true if we
+ // successfully waited, in which case the sync lock will have been taken to
+ // ensure future syncs don't state, or resolves with false if we gave up
+ // waiting for the sync to complete (in which case we didn't take a lock -
+ // but note that Sync probably remains locked in this case regardless.)
+ async promiseNotSyncing(abortController) {
+ let weave = this.getWeave();
+ let log = lazy.Log.repository.getLogger("Sync.Service");
+ // We might be syncing - poll for up to 2 minutes waiting for the lock.
+ // (2 minutes seems extreme, but should be very rare.)
+ return new Promise(resolve => {
+ abortController.signal.onabort = () => {
+ resolve(false);
+ };
+
+ let attempts = 0;
+ let checkLock = () => {
+ if (abortController.signal.aborted) {
+ // We've already resolved, so don't want a new timer to ever start.
+ return;
+ }
+ if (weave.Service.lock()) {
+ resolve(true);
+ return;
+ }
+ attempts += 1;
+ if (attempts >= this.lockRetryCount) {
+ log.error(
+ "Gave up waiting for the sync lock - going ahead with sanitize anyway"
+ );
+ resolve(false);
+ return;
+ }
+ log.debug("Waiting a couple of seconds to get the sync lock");
+ lazy.setTimeout(checkLock, this.lockRetryInterval);
+ };
+ checkLock();
+ });
+ },
+
+ // Sanitize Sync-related data.
+ async doSanitizeSyncData() {
+ let weave = this.getWeave();
+ // Get the sync logger - if stuff goes wrong it can be useful to have that
+ // recorded in the sync logs.
+ let log = lazy.Log.repository.getLogger("Sync.Service");
+ log.info("Starting santitize of Sync data");
+ try {
+ // We clobber data for all Sync engines that are enabled.
+ await weave.Service.promiseInitialized;
+ weave.Service.enabled = false;
+
+ log.info("starting actual sanitization");
+ for (let engine of weave.Service.engineManager.getAll()) {
+ if (engine.enabled) {
+ try {
+ log.info("Wiping engine", engine.name);
+ await engine.wipeClient();
+ } catch (ex) {
+ log.error("Failed to wipe engine", ex);
+ }
+ }
+ }
+ // Reset the pref which is used to show a warning when a different user
+ // signs in - this is no longer a concern now that we've removed the
+ // data from the profile.
+ Services.prefs.clearUserPref(lazy.FxAccountsCommon.PREF_LAST_FXA_USER);
+
+ log.info("Finished wiping sync data");
+ } catch (ex) {
+ log.error("Failed to sanitize Sync data", ex);
+ console.error("Failed to sanitize Sync data", ex);
+ }
+ try {
+ // ensure any logs we wrote are flushed to disk.
+ await weave.Service.errorHandler.resetFileLog();
+ } catch (ex) {
+ console.log("Failed to flush the Sync log", ex);
+ }
+ },
+
+ // Sanitize all Browser data.
+ async doSanitizeBrowserData() {
+ try {
+ // sanitize everything other than "open windows" (and we don't do that
+ // because it may confuse the user - they probably want to see
+ // about:prefs with the disconnection reflected.
+ let itemsToClear = Object.keys(lazy.Sanitizer.items).filter(
+ k => k != "openWindows"
+ );
+ await lazy.Sanitizer.sanitize(itemsToClear);
+ } catch (ex) {
+ console.error("Failed to sanitize other data", ex);
+ }
+ },
+
+ async doSyncAndAccountDisconnect(shouldUnlock) {
+ // We do a startOver of Sync first - if we do the account first we end
+ // up with Sync configured but FxA not configured, which causes the browser
+ // UI to briefly enter a "needs reauth" state.
+ let Weave = this.getWeave();
+ await Weave.Service.promiseInitialized;
+ await Weave.Service.startOver();
+ await lazy.fxAccounts.signOut();
+ // Sync may have been disabled if we santized, so re-enable it now or
+ // else the user will be unable to resync should they sign in before a
+ // restart.
+ Weave.Service.enabled = true;
+
+ // and finally, if we managed to get the lock before, we should unlock it
+ // now.
+ if (shouldUnlock) {
+ Weave.Service.unlock();
+ }
+ },
+
+ // Start the sanitization process. Returns a promise that resolves when
+ // the sanitize is complete, and an AbortController which can be used to
+ // abort the process of waiting for a sync to complete.
+ async _startDisconnect(abortController, sanitizeData = false) {
+ // This is a bit convoluted - we want to wait for a sync to finish before
+ // sanitizing, but want to abort that wait if the browser shuts down while
+ // we are waiting (in which case we'll charge ahead anyway).
+ // So we do this by using an AbortController and passing that to the
+ // function that waits for the sync lock - it will immediately resolve
+ // if the abort controller is aborted.
+ let log = lazy.Log.repository.getLogger("Sync.Service");
+
+ // If the master-password is locked then we will fail to fully sanitize,
+ // so prompt for that now. If canceled, we just abort now.
+ log.info("checking master-password state");
+ if (!lazy.Utils.ensureMPUnlocked()) {
+ log.warn(
+ "The master-password needs to be unlocked to fully disconnect from sync"
+ );
+ return;
+ }
+
+ log.info("waiting for any existing syncs to complete");
+ let locked = await this.promiseNotSyncing(abortController);
+
+ if (sanitizeData) {
+ await this.doSanitizeSyncData();
+
+ // We disconnect before sanitizing the browser data - in a worst-case
+ // scenario where the sanitize takes so long that even the shutdown
+ // blocker doesn't allow it to finish, we should still at least be in
+ // a disconnected state on the next startup.
+ log.info("disconnecting account");
+ await this.doSyncAndAccountDisconnect(locked);
+
+ await this.doSanitizeBrowserData();
+ } else {
+ log.info("disconnecting account");
+ await this.doSyncAndAccountDisconnect(locked);
+ }
+ },
+
+ async disconnect(sanitizeData) {
+ if (this.promiseDisconnectFinished) {
+ throw new Error("A disconnect is already in progress");
+ }
+ let abortController = new AbortController();
+ let promiseDisconnectFinished = this._startDisconnect(
+ abortController,
+ sanitizeData
+ );
+ this.promiseDisconnectFinished = promiseDisconnectFinished;
+ let shutdownBlocker = () => {
+ // oh dear - we are sanitizing (probably stuck waiting for a sync to
+ // complete) and the browser is shutting down. Let's avoid the wait
+ // for sync to complete and continue the process anyway.
+ abortController.abort();
+ return promiseDisconnectFinished;
+ };
+ lazy.AsyncShutdown.quitApplicationGranted.addBlocker(
+ "SyncDisconnect: removing requested data",
+ shutdownBlocker
+ );
+
+ // wait for it to finish - hopefully without the blocker being called.
+ await promiseDisconnectFinished;
+ this.promiseDisconnectFinished = null;
+
+ // sanitize worked so remove our blocker - it's a noop if the blocker
+ // did call us.
+ lazy.AsyncShutdown.quitApplicationGranted.removeBlocker(shutdownBlocker);
+ },
+};
+
+export const SyncDisconnect = {
+ get promiseDisconnectFinished() {
+ return SyncDisconnectInternal.promiseDisconnectFinished;
+ },
+
+ disconnect(sanitizeData) {
+ return SyncDisconnectInternal.disconnect(sanitizeData);
+ },
+};
diff --git a/services/sync/modules/SyncedTabs.sys.mjs b/services/sync/modules/SyncedTabs.sys.mjs
new file mode 100644
index 0000000000..9af388da8a
--- /dev/null
+++ b/services/sync/modules/SyncedTabs.sys.mjs
@@ -0,0 +1,332 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ CLIENT_NOT_CONFIGURED: "resource://services-sync/constants.sys.mjs",
+ Preferences: "resource://gre/modules/Preferences.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+// The Sync XPCOM service
+XPCOMUtils.defineLazyGetter(lazy, "weaveXPCService", function () {
+ return Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+});
+
+// from MDN...
+function escapeRegExp(string) {
+ return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
+}
+
+// A topic we fire whenever we have new tabs available. This might be due
+// to a request made by this module to refresh the tab list, or as the result
+// of a regularly scheduled sync. The intent is that consumers just listen
+// for this notification and update their UI in response.
+const TOPIC_TABS_CHANGED = "services.sync.tabs.changed";
+
+// The interval, in seconds, before which we consider the existing list
+// of tabs "fresh enough" and don't force a new sync.
+const TABS_FRESH_ENOUGH_INTERVAL_SECONDS = 30;
+
+XPCOMUtils.defineLazyGetter(lazy, "log", function () {
+ let log = Log.repository.getLogger("Sync.RemoteTabs");
+ log.manageLevelFromPref("services.sync.log.logger.tabs");
+ return log;
+});
+
+// A private singleton that does the work.
+let SyncedTabsInternal = {
+ /* Make a "tab" record. Returns a promise */
+ async _makeTab(client, tab, url, showRemoteIcons) {
+ let icon;
+ if (showRemoteIcons) {
+ icon = tab.icon;
+ }
+ if (!icon) {
+ // By not specifying a size the favicon service will pick the default,
+ // that is usually set through setDefaultIconURIPreferredSize by the
+ // first browser window. Commonly it's 16px at current dpi.
+ icon = "page-icon:" + url;
+ }
+ return {
+ type: "tab",
+ title: tab.title || url,
+ url,
+ icon,
+ client: client.id,
+ lastUsed: tab.lastUsed,
+ };
+ },
+
+ /* Make a "client" record. Returns a promise for consistency with _makeTab */
+ async _makeClient(client) {
+ return {
+ id: client.id,
+ type: "client",
+ name: lazy.Weave.Service.clientsEngine.getClientName(client.id),
+ clientType: lazy.Weave.Service.clientsEngine.getClientType(client.id),
+ lastModified: client.lastModified * 1000, // sec to ms
+ tabs: [],
+ };
+ },
+
+ _tabMatchesFilter(tab, filter) {
+ let reFilter = new RegExp(escapeRegExp(filter), "i");
+ return reFilter.test(tab.url) || reFilter.test(tab.title);
+ },
+
+ _createRecentTabsList(clients, maxCount) {
+ let tabs = [];
+
+ for (let client of clients) {
+ for (let tab of client.tabs) {
+ tab.device = client.name;
+ tab.deviceType = client.clientType;
+ }
+ tabs = [...tabs, ...client.tabs.reverse()];
+ }
+ tabs = this._filterRecentTabsDupes(tabs);
+ tabs = tabs.sort((a, b) => b.lastUsed - a.lastUsed).slice(0, maxCount);
+ return tabs;
+ },
+
+ _filterRecentTabsDupes(tabs) {
+ // Filter out any tabs with duplicate URLs preserving
+ // the duplicate with the most recent lastUsed value
+ return tabs.filter(tab => {
+ return !tabs.some(t => {
+ return t.url === tab.url && tab.lastUsed < t.lastUsed;
+ });
+ });
+ },
+
+ async getTabClients(filter) {
+ lazy.log.info("Generating tab list with filter", filter);
+ let result = [];
+
+ // If Sync isn't ready, don't try and get anything.
+ if (!lazy.weaveXPCService.ready) {
+ lazy.log.debug("Sync isn't yet ready, so returning an empty tab list");
+ return result;
+ }
+
+ // A boolean that controls whether we should show the icon from the remote tab.
+ const showRemoteIcons = lazy.Preferences.get(
+ "services.sync.syncedTabs.showRemoteIcons",
+ true
+ );
+
+ let engine = lazy.Weave.Service.engineManager.get("tabs");
+
+ let ntabs = 0;
+ let clientTabList = await engine.getAllClients();
+ for (let client of clientTabList) {
+ if (!lazy.Weave.Service.clientsEngine.remoteClientExists(client.id)) {
+ continue;
+ }
+ let clientRepr = await this._makeClient(client);
+ lazy.log.debug("Processing client", clientRepr);
+
+ for (let tab of client.tabs) {
+ let url = tab.urlHistory[0];
+ lazy.log.trace("remote tab", url);
+
+ if (!url) {
+ continue;
+ }
+ let tabRepr = await this._makeTab(client, tab, url, showRemoteIcons);
+ if (filter && !this._tabMatchesFilter(tabRepr, filter)) {
+ continue;
+ }
+ clientRepr.tabs.push(tabRepr);
+ }
+ // We return all clients, even those without tabs - the consumer should
+ // filter it if they care.
+ ntabs += clientRepr.tabs.length;
+ result.push(clientRepr);
+ }
+ lazy.log.info(
+ `Final tab list has ${result.length} clients with ${ntabs} tabs.`
+ );
+ return result;
+ },
+
+ async syncTabs(force) {
+ if (!force) {
+ // Don't bother refetching tabs if we already did so recently
+ let lastFetch = lazy.Preferences.get("services.sync.lastTabFetch", 0);
+ let now = Math.floor(Date.now() / 1000);
+ if (now - lastFetch < TABS_FRESH_ENOUGH_INTERVAL_SECONDS) {
+ lazy.log.info("_refetchTabs was done recently, do not doing it again");
+ return false;
+ }
+ }
+
+ // If Sync isn't configured don't try and sync, else we will get reports
+ // of a login failure.
+ if (lazy.Weave.Status.checkSetup() === lazy.CLIENT_NOT_CONFIGURED) {
+ lazy.log.info(
+ "Sync client is not configured, so not attempting a tab sync"
+ );
+ return false;
+ }
+ // If the primary pass is locked, we should not try to sync
+ if (lazy.Weave.Utils.mpLocked()) {
+ lazy.log.info(
+ "Can't sync tabs due to the primary password being locked",
+ lazy.Weave.Status.login
+ );
+ return false;
+ }
+ // Ask Sync to just do the tabs engine if it can.
+ try {
+ lazy.log.info("Doing a tab sync.");
+ await lazy.Weave.Service.sync({ why: "tabs", engines: ["tabs"] });
+ return true;
+ } catch (ex) {
+ lazy.log.error("Sync failed", ex);
+ throw ex;
+ }
+ },
+
+ observe(subject, topic, data) {
+ lazy.log.trace(`observed topic=${topic}, data=${data}, subject=${subject}`);
+ switch (topic) {
+ case "weave:engine:sync:finish":
+ if (data != "tabs") {
+ return;
+ }
+ // The tabs engine just finished syncing
+ // Set our lastTabFetch pref here so it tracks both explicit sync calls
+ // and normally scheduled ones.
+ lazy.Preferences.set(
+ "services.sync.lastTabFetch",
+ Math.floor(Date.now() / 1000)
+ );
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED);
+ break;
+ case "weave:service:start-over":
+ // start-over needs to notify so consumers find no tabs.
+ lazy.Preferences.reset("services.sync.lastTabFetch");
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED);
+ break;
+ case "nsPref:changed":
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED);
+ break;
+ default:
+ break;
+ }
+ },
+
+ // Returns true if Sync is configured to Sync tabs, false otherwise
+ get isConfiguredToSyncTabs() {
+ if (!lazy.weaveXPCService.ready) {
+ lazy.log.debug("Sync isn't yet ready; assuming tab engine is enabled");
+ return true;
+ }
+
+ let engine = lazy.Weave.Service.engineManager.get("tabs");
+ return engine && engine.enabled;
+ },
+
+ get hasSyncedThisSession() {
+ let engine = lazy.Weave.Service.engineManager.get("tabs");
+ return engine && engine.hasSyncedThisSession;
+ },
+};
+
+Services.obs.addObserver(SyncedTabsInternal, "weave:engine:sync:finish");
+Services.obs.addObserver(SyncedTabsInternal, "weave:service:start-over");
+// Observe the pref the indicates the state of the tabs engine has changed.
+// This will force consumers to re-evaluate the state of sync and update
+// accordingly.
+Services.prefs.addObserver("services.sync.engine.tabs", SyncedTabsInternal);
+
+// The public interface.
+export var SyncedTabs = {
+ // A mock-point for tests.
+ _internal: SyncedTabsInternal,
+
+ // We make the topic for the observer notification public.
+ TOPIC_TABS_CHANGED,
+
+ // Expose the interval used to determine if synced tabs data needs a new sync
+ TABS_FRESH_ENOUGH_INTERVAL_SECONDS,
+
+ // Returns true if Sync is configured to Sync tabs, false otherwise
+ get isConfiguredToSyncTabs() {
+ return this._internal.isConfiguredToSyncTabs;
+ },
+
+ // Returns true if a tab sync has completed once this session. If this
+ // returns false, then getting back no clients/tabs possibly just means we
+ // are waiting for that first sync to complete.
+ get hasSyncedThisSession() {
+ return this._internal.hasSyncedThisSession;
+ },
+
+ // Return a promise that resolves with an array of client records, each with
+ // a .tabs array. Note that part of the contract for this module is that the
+ // returned objects are not shared between invocations, so callers are free
+ // to mutate the returned objects (eg, sort, truncate) however they see fit.
+ getTabClients(query) {
+ return this._internal.getTabClients(query);
+ },
+
+ // Starts a background request to start syncing tabs. Returns a promise that
+ // resolves when the sync is complete, but there's no resolved value -
+ // callers should be listening for TOPIC_TABS_CHANGED.
+ // If |force| is true we always sync. If false, we only sync if the most
+ // recent sync wasn't "recently".
+ syncTabs(force) {
+ return this._internal.syncTabs(force);
+ },
+
+ sortTabClientsByLastUsed(clients) {
+ // First sort the list of tabs for each client. Note that
+ // this module promises that the objects it returns are never
+ // shared, so we are free to mutate those objects directly.
+ for (let client of clients) {
+ let tabs = client.tabs;
+ tabs.sort((a, b) => b.lastUsed - a.lastUsed);
+ }
+ // Now sort the clients - the clients are sorted in the order of the
+ // most recent tab for that client (ie, it is important the tabs for
+ // each client are already sorted.)
+ clients.sort((a, b) => {
+ if (!a.tabs.length) {
+ return 1; // b comes first.
+ }
+ if (!b.tabs.length) {
+ return -1; // a comes first.
+ }
+ return b.tabs[0].lastUsed - a.tabs[0].lastUsed;
+ });
+ },
+
+ recordSyncedTabsTelemetry(object, tabEvent, extraOptions) {
+ Services.telemetry.setEventRecordingEnabled("synced_tabs", true);
+ Services.telemetry.recordEvent(
+ "synced_tabs",
+ tabEvent,
+ object,
+ null,
+ extraOptions
+ );
+ },
+
+ // Get list of synced tabs across all devices/clients
+ // truncated by value of maxCount param, sorted by
+ // lastUsed value, and filtered for duplicate URLs
+ async getRecentTabs(maxCount) {
+ let clients = await this.getTabClients();
+ return this._internal._createRecentTabsList(clients, maxCount);
+ },
+};
diff --git a/services/sync/modules/UIState.sys.mjs b/services/sync/modules/UIState.sys.mjs
new file mode 100644
index 0000000000..c169712523
--- /dev/null
+++ b/services/sync/modules/UIState.sys.mjs
@@ -0,0 +1,287 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * @typedef {Object} UIState
+ * @property {string} status The Sync/FxA status, see STATUS_* constants.
+ * @property {string} [email] The FxA email configured to log-in with Sync.
+ * @property {string} [displayName] The user's FxA display name.
+ * @property {string} [avatarURL] The user's FxA avatar URL.
+ * @property {Date} [lastSync] The last sync time.
+ * @property {boolean} [syncing] Whether or not we are currently syncing.
+ */
+
+const lazy = {};
+ChromeUtils.defineESModuleGetters(lazy, {
+ LOGIN_FAILED_LOGIN_REJECTED: "resource://services-sync/constants.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+const TOPICS = [
+ "weave:connected",
+ "weave:service:login:got-hashed-id",
+ "weave:service:login:error",
+ "weave:service:ready",
+ "weave:service:sync:start",
+ "weave:service:sync:finish",
+ "weave:service:sync:error",
+ "weave:service:start-over:finish",
+ "fxaccounts:onverified",
+ "fxaccounts:onlogin", // Defined in FxAccountsCommon, pulling it is expensive.
+ "fxaccounts:onlogout",
+ "fxaccounts:profilechange",
+ "fxaccounts:statechange",
+];
+
+const ON_UPDATE = "sync-ui-state:update";
+
+const STATUS_NOT_CONFIGURED = "not_configured";
+const STATUS_LOGIN_FAILED = "login_failed";
+const STATUS_NOT_VERIFIED = "not_verified";
+const STATUS_SIGNED_IN = "signed_in";
+
+const DEFAULT_STATE = {
+ status: STATUS_NOT_CONFIGURED,
+};
+
+const UIStateInternal = {
+ _initialized: false,
+ _state: null,
+
+ // We keep _syncing out of the state object because we can only track it
+ // using sync events and we can't determine it at any point in time.
+ _syncing: false,
+
+ get state() {
+ if (!this._state) {
+ return DEFAULT_STATE;
+ }
+ return Object.assign({}, this._state, { syncing: this._syncing });
+ },
+
+ isReady() {
+ if (!this._initialized) {
+ this.init();
+ return false;
+ }
+ return true;
+ },
+
+ init() {
+ this._initialized = true;
+ // Because the FxA toolbar is usually visible, this module gets loaded at
+ // browser startup, and we want to avoid pulling in all of FxA or Sync at
+ // that time, so we refresh the state after the browser has settled.
+ Services.tm.idleDispatchToMainThread(() => {
+ this.refreshState().catch(e => {
+ console.error(e);
+ });
+ }, 2000);
+ },
+
+ // Used for testing.
+ reset() {
+ this._state = null;
+ this._syncing = false;
+ this._initialized = false;
+ },
+
+ observe(subject, topic, data) {
+ switch (topic) {
+ case "weave:service:sync:start":
+ this.toggleSyncActivity(true);
+ break;
+ case "weave:service:sync:finish":
+ case "weave:service:sync:error":
+ this.toggleSyncActivity(false);
+ break;
+ default:
+ this.refreshState().catch(e => {
+ console.error(e);
+ });
+ break;
+ }
+ },
+
+ // Builds a new state from scratch.
+ async refreshState() {
+ const newState = {};
+ await this._refreshFxAState(newState);
+ // Optimize the "not signed in" case to avoid refreshing twice just after
+ // startup - if there's currently no _state, and we still aren't configured,
+ // just early exit.
+ if (this._state == null && newState.status == DEFAULT_STATE.status) {
+ return this.state;
+ }
+ if (newState.syncEnabled) {
+ this._setLastSyncTime(newState); // We want this in case we change accounts.
+ }
+ this._state = newState;
+
+ this.notifyStateUpdated();
+ return this.state;
+ },
+
+ // Update the current state with the last sync time/currently syncing status.
+ toggleSyncActivity(syncing) {
+ this._syncing = syncing;
+ this._setLastSyncTime(this._state);
+
+ this.notifyStateUpdated();
+ },
+
+ notifyStateUpdated() {
+ Services.obs.notifyObservers(null, ON_UPDATE);
+ },
+
+ async _refreshFxAState(newState) {
+ let userData = await this._getUserData();
+ await this._populateWithUserData(newState, userData);
+ },
+
+ async _populateWithUserData(state, userData) {
+ let status;
+ let syncUserName = Services.prefs.getStringPref(
+ "services.sync.username",
+ ""
+ );
+ if (!userData) {
+ // If Sync thinks it is configured but there's no FxA user, then we
+ // want to enter the "login failed" state so the user can get
+ // reconfigured.
+ if (syncUserName) {
+ state.email = syncUserName;
+ status = STATUS_LOGIN_FAILED;
+ } else {
+ // everyone agrees nothing is configured.
+ status = STATUS_NOT_CONFIGURED;
+ }
+ } else {
+ let loginFailed = await this._loginFailed();
+ if (loginFailed) {
+ status = STATUS_LOGIN_FAILED;
+ } else if (!userData.verified) {
+ status = STATUS_NOT_VERIFIED;
+ } else {
+ status = STATUS_SIGNED_IN;
+ }
+ state.uid = userData.uid;
+ state.email = userData.email;
+ state.displayName = userData.displayName;
+ // for better or worse, this module renames these attribues.
+ state.avatarURL = userData.avatar;
+ state.avatarIsDefault = userData.avatarDefault;
+ state.syncEnabled = !!syncUserName;
+ }
+ state.status = status;
+ },
+
+ async _getUserData() {
+ try {
+ return await this.fxAccounts.getSignedInUser();
+ } catch (e) {
+ // This is most likely in tests, where we quickly log users in and out.
+ // The most likely scenario is a user logged out, so reflect that.
+ // Bug 995134 calls for better errors so we could retry if we were
+ // sure this was the failure reason.
+ console.error("Error updating FxA account info: " + e);
+ return null;
+ }
+ },
+
+ _setLastSyncTime(state) {
+ if (state?.status == UIState.STATUS_SIGNED_IN) {
+ const lastSync = Services.prefs.getCharPref(
+ "services.sync.lastSync",
+ null
+ );
+ state.lastSync = lastSync ? new Date(lastSync) : null;
+ }
+ },
+
+ async _loginFailed() {
+ // First ask FxA if it thinks the user needs re-authentication. In practice,
+ // this check is probably canonical (ie, we probably don't really need
+ // the check below at all as we drop local session info on the first sign
+ // of a problem) - but we keep it for now to keep the risk down.
+ let hasLocalSession = await this.fxAccounts.hasLocalSession();
+ if (!hasLocalSession) {
+ return true;
+ }
+
+ // Referencing Weave.Service will implicitly initialize sync, and we don't
+ // want to force that - so first check if it is ready.
+ let service = Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+ if (!service.ready) {
+ return false;
+ }
+ // LOGIN_FAILED_LOGIN_REJECTED explicitly means "you must log back in".
+ // All other login failures are assumed to be transient and should go
+ // away by themselves, so aren't reflected here.
+ return lazy.Weave.Status.login == lazy.LOGIN_FAILED_LOGIN_REJECTED;
+ },
+
+ set fxAccounts(mockFxAccounts) {
+ delete this.fxAccounts;
+ this.fxAccounts = mockFxAccounts;
+ },
+};
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+XPCOMUtils.defineLazyGetter(UIStateInternal, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+for (let topic of TOPICS) {
+ Services.obs.addObserver(UIStateInternal, topic);
+}
+
+export var UIState = {
+ _internal: UIStateInternal,
+
+ ON_UPDATE,
+
+ STATUS_NOT_CONFIGURED,
+ STATUS_LOGIN_FAILED,
+ STATUS_NOT_VERIFIED,
+ STATUS_SIGNED_IN,
+
+ /**
+ * Returns true if the module has been initialized and the state set.
+ * If not, return false and trigger an init in the background.
+ */
+ isReady() {
+ return this._internal.isReady();
+ },
+
+ /**
+ * @returns {UIState} The current Sync/FxA UI State.
+ */
+ get() {
+ return this._internal.state;
+ },
+
+ /**
+ * Refresh the state. Used for testing, don't call this directly since
+ * UIState already listens to Sync/FxA notifications to determine if the state
+ * needs to be refreshed. ON_UPDATE will be fired once the state is refreshed.
+ *
+ * @returns {Promise<UIState>} Resolved once the state is refreshed.
+ */
+ refresh() {
+ return this._internal.refreshState();
+ },
+
+ /**
+ * Reset the state of the whole module. Used for testing.
+ */
+ reset() {
+ this._internal.reset();
+ },
+};
diff --git a/services/sync/modules/addonsreconciler.sys.mjs b/services/sync/modules/addonsreconciler.sys.mjs
new file mode 100644
index 0000000000..902f57348e
--- /dev/null
+++ b/services/sync/modules/addonsreconciler.sys.mjs
@@ -0,0 +1,584 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains middleware to reconcile state of AddonManager for
+ * purposes of tracking events for Sync. The content in this file exists
+ * because AddonManager does not have a getChangesSinceX() API and adding
+ * that functionality properly was deemed too time-consuming at the time
+ * add-on sync was originally written. If/when AddonManager adds this API,
+ * this file can go away and the add-ons engine can be rewritten to use it.
+ *
+ * It was decided to have this tracking functionality exist in a separate
+ * standalone file so it could be more easily understood, tested, and
+ * hopefully ported.
+ */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { AddonManager } from "resource://gre/modules/AddonManager.sys.mjs";
+
+const DEFAULT_STATE_FILE = "addonsreconciler";
+
+export var CHANGE_INSTALLED = 1;
+export var CHANGE_UNINSTALLED = 2;
+export var CHANGE_ENABLED = 3;
+export var CHANGE_DISABLED = 4;
+
+/**
+ * Maintains state of add-ons.
+ *
+ * State is maintained in 2 data structures, an object mapping add-on IDs
+ * to metadata and an array of changes over time. The object mapping can be
+ * thought of as a minimal copy of data from AddonManager which is needed for
+ * Sync. The array is effectively a log of changes over time.
+ *
+ * The data structures are persisted to disk by serializing to a JSON file in
+ * the current profile. The data structures are updated by 2 mechanisms. First,
+ * they can be refreshed from the global state of the AddonManager. This is a
+ * sure-fire way of ensuring the reconciler is up to date. Second, the
+ * reconciler adds itself as an AddonManager listener. When it receives change
+ * notifications, it updates its internal state incrementally.
+ *
+ * The internal state is persisted to a JSON file in the profile directory.
+ *
+ * An instance of this is bound to an AddonsEngine instance. In reality, it
+ * likely exists as a singleton. To AddonsEngine, it functions as a store and
+ * an entity which emits events for tracking.
+ *
+ * The usage pattern for instances of this class is:
+ *
+ * let reconciler = new AddonsReconciler(...);
+ * await reconciler.ensureStateLoaded();
+ *
+ * // At this point, your instance should be ready to use.
+ *
+ * When you are finished with the instance, please call:
+ *
+ * reconciler.stopListening();
+ * await reconciler.saveState(...);
+ *
+ * This class uses the AddonManager AddonListener interface.
+ * When an add-on is installed, listeners are called in the following order:
+ * AL.onInstalling, AL.onInstalled
+ *
+ * For uninstalls, we see AL.onUninstalling then AL.onUninstalled.
+ *
+ * Enabling and disabling work by sending:
+ *
+ * AL.onEnabling, AL.onEnabled
+ * AL.onDisabling, AL.onDisabled
+ *
+ * Actions can be undone. All undoable actions notify the same
+ * AL.onOperationCancelled event. We treat this event like any other.
+ *
+ * When an add-on is uninstalled from about:addons, the user is offered an
+ * "Undo" option, which leads to the following sequence of events as
+ * observed by an AddonListener:
+ * Add-ons are first disabled then they are actually uninstalled. So, we will
+ * see AL.onDisabling and AL.onDisabled. The onUninstalling and onUninstalled
+ * events only come after the Addon Manager is closed or another view is
+ * switched to. In the case of Sync performing the uninstall, the uninstall
+ * events will occur immediately. However, we still see disabling events and
+ * heed them like they were normal. In the end, the state is proper.
+ */
+export function AddonsReconciler(queueCaller) {
+ this._log = Log.repository.getLogger("Sync.AddonsReconciler");
+ this._log.manageLevelFromPref("services.sync.log.logger.addonsreconciler");
+ this.queueCaller = queueCaller;
+
+ Svc.Obs.add("xpcom-shutdown", this.stopListening, this);
+}
+
+AddonsReconciler.prototype = {
+ /** Flag indicating whether we are listening to AddonManager events. */
+ _listening: false,
+
+ /**
+ * Define this as false if the reconciler should not persist state
+ * to disk when handling events.
+ *
+ * This allows test code to avoid spinning to write during observer
+ * notifications and xpcom shutdown, which appears to cause hangs on WinXP
+ * (Bug 873861).
+ */
+ _shouldPersist: true,
+
+ /** Log logger instance */
+ _log: null,
+
+ /**
+ * Container for add-on metadata.
+ *
+ * Keys are add-on IDs. Values are objects which describe the state of the
+ * add-on. This is a minimal mirror of data that can be queried from
+ * AddonManager. In some cases, we retain data longer than AddonManager.
+ */
+ _addons: {},
+
+ /**
+ * List of add-on changes over time.
+ *
+ * Each element is an array of [time, change, id].
+ */
+ _changes: [],
+
+ /**
+ * Objects subscribed to changes made to this instance.
+ */
+ _listeners: [],
+
+ /**
+ * Accessor for add-ons in this object.
+ *
+ * Returns an object mapping add-on IDs to objects containing metadata.
+ */
+ get addons() {
+ return this._addons;
+ },
+
+ async ensureStateLoaded() {
+ if (!this._promiseStateLoaded) {
+ this._promiseStateLoaded = this.loadState();
+ }
+ return this._promiseStateLoaded;
+ },
+
+ /**
+ * Load reconciler state from a file.
+ *
+ * The path is relative to the weave directory in the profile. If no
+ * path is given, the default one is used.
+ *
+ * If the file does not exist or there was an error parsing the file, the
+ * state will be transparently defined as empty.
+ *
+ * @param file
+ * Path to load. ".json" is appended automatically. If not defined,
+ * a default path will be consulted.
+ */
+ async loadState(file = DEFAULT_STATE_FILE) {
+ let json = await Utils.jsonLoad(file, this);
+ this._addons = {};
+ this._changes = [];
+
+ if (!json) {
+ this._log.debug("No data seen in loaded file: " + file);
+ return false;
+ }
+
+ let version = json.version;
+ if (!version || version != 1) {
+ this._log.error(
+ "Could not load JSON file because version not " +
+ "supported: " +
+ version
+ );
+ return false;
+ }
+
+ this._addons = json.addons;
+ for (let id in this._addons) {
+ let record = this._addons[id];
+ record.modified = new Date(record.modified);
+ }
+
+ for (let [time, change, id] of json.changes) {
+ this._changes.push([new Date(time), change, id]);
+ }
+
+ return true;
+ },
+
+ /**
+ * Saves the current state to a file in the local profile.
+ *
+ * @param file
+ * String path in profile to save to. If not defined, the default
+ * will be used.
+ */
+ async saveState(file = DEFAULT_STATE_FILE) {
+ let state = { version: 1, addons: {}, changes: [] };
+
+ for (let [id, record] of Object.entries(this._addons)) {
+ state.addons[id] = {};
+ for (let [k, v] of Object.entries(record)) {
+ if (k == "modified") {
+ state.addons[id][k] = v.getTime();
+ } else {
+ state.addons[id][k] = v;
+ }
+ }
+ }
+
+ for (let [time, change, id] of this._changes) {
+ state.changes.push([time.getTime(), change, id]);
+ }
+
+ this._log.info("Saving reconciler state to file: " + file);
+ await Utils.jsonSave(file, this, state);
+ },
+
+ /**
+ * Registers a change listener with this instance.
+ *
+ * Change listeners are called every time a change is recorded. The listener
+ * is an object with the function "changeListener" that takes 3 arguments,
+ * the Date at which the change happened, the type of change (a CHANGE_*
+ * constant), and the add-on state object reflecting the current state of
+ * the add-on at the time of the change.
+ *
+ * @param listener
+ * Object containing changeListener function.
+ */
+ addChangeListener: function addChangeListener(listener) {
+ if (!this._listeners.includes(listener)) {
+ this._log.debug("Adding change listener.");
+ this._listeners.push(listener);
+ }
+ },
+
+ /**
+ * Removes a previously-installed change listener from the instance.
+ *
+ * @param listener
+ * Listener instance to remove.
+ */
+ removeChangeListener: function removeChangeListener(listener) {
+ this._listeners = this._listeners.filter(element => {
+ if (element == listener) {
+ this._log.debug("Removing change listener.");
+ return false;
+ }
+ return true;
+ });
+ },
+
+ /**
+ * Tells the instance to start listening for AddonManager changes.
+ *
+ * This is typically called automatically when Sync is loaded.
+ */
+ startListening: function startListening() {
+ if (this._listening) {
+ return;
+ }
+
+ this._log.info("Registering as Add-on Manager listener.");
+ AddonManager.addAddonListener(this);
+ this._listening = true;
+ },
+
+ /**
+ * Tells the instance to stop listening for AddonManager changes.
+ *
+ * The reconciler should always be listening. This should only be called when
+ * the instance is being destroyed.
+ *
+ * This function will get called automatically on XPCOM shutdown. However, it
+ * is a best practice to call it yourself.
+ */
+ stopListening: function stopListening() {
+ if (!this._listening) {
+ return;
+ }
+
+ this._log.debug("Stopping listening and removing AddonManager listener.");
+ AddonManager.removeAddonListener(this);
+ this._listening = false;
+ },
+
+ /**
+ * Refreshes the global state of add-ons by querying the AddonManager.
+ */
+ async refreshGlobalState() {
+ this._log.info("Refreshing global state from AddonManager.");
+
+ let installs;
+ let addons = await AddonManager.getAllAddons();
+
+ let ids = {};
+
+ for (let addon of addons) {
+ ids[addon.id] = true;
+ await this.rectifyStateFromAddon(addon);
+ }
+
+ // Look for locally-defined add-ons that no longer exist and update their
+ // record.
+ for (let [id, addon] of Object.entries(this._addons)) {
+ if (id in ids) {
+ continue;
+ }
+
+ // If the id isn't in ids, it means that the add-on has been deleted or
+ // the add-on is in the process of being installed. We detect the
+ // latter by seeing if an AddonInstall is found for this add-on.
+
+ if (!installs) {
+ installs = await AddonManager.getAllInstalls();
+ }
+
+ let installFound = false;
+ for (let install of installs) {
+ if (
+ install.addon &&
+ install.addon.id == id &&
+ install.state == AddonManager.STATE_INSTALLED
+ ) {
+ installFound = true;
+ break;
+ }
+ }
+
+ if (installFound) {
+ continue;
+ }
+
+ if (addon.installed) {
+ addon.installed = false;
+ this._log.debug(
+ "Adding change because add-on not present in " +
+ "Add-on Manager: " +
+ id
+ );
+ await this._addChange(new Date(), CHANGE_UNINSTALLED, addon);
+ }
+ }
+
+ // See note for _shouldPersist.
+ if (this._shouldPersist) {
+ await this.saveState();
+ }
+ },
+
+ /**
+ * Rectifies the state of an add-on from an Addon instance.
+ *
+ * This basically says "given an Addon instance, assume it is truth and
+ * apply changes to the local state to reflect it."
+ *
+ * This function could result in change listeners being called if the local
+ * state differs from the passed add-on's state.
+ *
+ * @param addon
+ * Addon instance being updated.
+ */
+ async rectifyStateFromAddon(addon) {
+ this._log.debug(
+ `Rectifying state for addon ${addon.name} (version=${addon.version}, id=${addon.id})`
+ );
+
+ let id = addon.id;
+ let enabled = !addon.userDisabled;
+ let guid = addon.syncGUID;
+ let now = new Date();
+
+ if (!(id in this._addons)) {
+ let record = {
+ id,
+ guid,
+ enabled,
+ installed: true,
+ modified: now,
+ type: addon.type,
+ scope: addon.scope,
+ foreignInstall: addon.foreignInstall,
+ isSyncable: addon.isSyncable,
+ };
+ this._addons[id] = record;
+ this._log.debug(
+ "Adding change because add-on not present locally: " + id
+ );
+ await this._addChange(now, CHANGE_INSTALLED, record);
+ return;
+ }
+
+ let record = this._addons[id];
+ record.isSyncable = addon.isSyncable;
+
+ if (!record.installed) {
+ // It is possible the record is marked as uninstalled because an
+ // uninstall is pending.
+ if (!(addon.pendingOperations & AddonManager.PENDING_UNINSTALL)) {
+ record.installed = true;
+ record.modified = now;
+ }
+ }
+
+ if (record.enabled != enabled) {
+ record.enabled = enabled;
+ record.modified = now;
+ let change = enabled ? CHANGE_ENABLED : CHANGE_DISABLED;
+ this._log.debug("Adding change because enabled state changed: " + id);
+ await this._addChange(new Date(), change, record);
+ }
+
+ if (record.guid != guid) {
+ record.guid = guid;
+ // We don't record a change because the Sync engine rectifies this on its
+ // own. This is tightly coupled with Sync. If this code is ever lifted
+ // outside of Sync, this exception should likely be removed.
+ }
+ },
+
+ /**
+ * Record a change in add-on state.
+ *
+ * @param date
+ * Date at which the change occurred.
+ * @param change
+ * The type of the change. A CHANGE_* constant.
+ * @param state
+ * The new state of the add-on. From this.addons.
+ */
+ async _addChange(date, change, state) {
+ this._log.info("Change recorded for " + state.id);
+ this._changes.push([date, change, state.id]);
+
+ for (let listener of this._listeners) {
+ try {
+ await listener.changeListener(date, change, state);
+ } catch (ex) {
+ this._log.error("Exception calling change listener", ex);
+ }
+ }
+ },
+
+ /**
+ * Obtain the set of changes to add-ons since the date passed.
+ *
+ * This will return an array of arrays. Each entry in the array has the
+ * elements [date, change_type, id], where
+ *
+ * date - Date instance representing when the change occurred.
+ * change_type - One of CHANGE_* constants.
+ * id - ID of add-on that changed.
+ */
+ getChangesSinceDate(date) {
+ let length = this._changes.length;
+ for (let i = 0; i < length; i++) {
+ if (this._changes[i][0] >= date) {
+ return this._changes.slice(i);
+ }
+ }
+
+ return [];
+ },
+
+ /**
+ * Prunes all recorded changes from before the specified Date.
+ *
+ * @param date
+ * Entries older than this Date will be removed.
+ */
+ pruneChangesBeforeDate(date) {
+ this._changes = this._changes.filter(function test_age(change) {
+ return change[0] >= date;
+ });
+ },
+
+ /**
+ * Obtains the set of all known Sync GUIDs for add-ons.
+ */
+ getAllSyncGUIDs() {
+ let result = {};
+ for (let id in this.addons) {
+ result[id] = true;
+ }
+
+ return result;
+ },
+
+ /**
+ * Obtain the add-on state record for an add-on by Sync GUID.
+ *
+ * If the add-on could not be found, returns null.
+ *
+ * @param guid
+ * Sync GUID of add-on to retrieve.
+ */
+ getAddonStateFromSyncGUID(guid) {
+ for (let id in this.addons) {
+ let addon = this.addons[id];
+ if (addon.guid == guid) {
+ return addon;
+ }
+ }
+
+ return null;
+ },
+
+ /**
+ * Handler that is invoked as part of the AddonManager listeners.
+ */
+ async _handleListener(action, addon) {
+ // Since this is called as an observer, we explicitly trap errors and
+ // log them to ourselves so we don't see errors reported elsewhere.
+ try {
+ let id = addon.id;
+ this._log.debug("Add-on change: " + action + " to " + id);
+
+ switch (action) {
+ case "onEnabled":
+ case "onDisabled":
+ case "onInstalled":
+ case "onInstallEnded":
+ case "onOperationCancelled":
+ await this.rectifyStateFromAddon(addon);
+ break;
+
+ case "onUninstalled":
+ let id = addon.id;
+ let addons = this.addons;
+ if (id in addons) {
+ let now = new Date();
+ let record = addons[id];
+ record.installed = false;
+ record.modified = now;
+ this._log.debug(
+ "Adding change because of uninstall listener: " + id
+ );
+ await this._addChange(now, CHANGE_UNINSTALLED, record);
+ }
+ }
+
+ // See note for _shouldPersist.
+ if (this._shouldPersist) {
+ await this.saveState();
+ }
+ } catch (ex) {
+ this._log.warn("Exception", ex);
+ }
+ },
+
+ // AddonListeners
+ onEnabled: function onEnabled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onEnabled", addon)
+ );
+ },
+ onDisabled: function onDisabled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onDisabled", addon)
+ );
+ },
+ onInstalled: function onInstalled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onInstalled", addon)
+ );
+ },
+ onUninstalled: function onUninstalled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onUninstalled", addon)
+ );
+ },
+ onOperationCancelled: function onOperationCancelled(addon) {
+ this.queueCaller.enqueueCall(() =>
+ this._handleListener("onOperationCancelled", addon)
+ );
+ },
+};
diff --git a/services/sync/modules/addonutils.sys.mjs b/services/sync/modules/addonutils.sys.mjs
new file mode 100644
index 0000000000..46167611e1
--- /dev/null
+++ b/services/sync/modules/addonutils.sys.mjs
@@ -0,0 +1,390 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Svc } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+ AddonRepository: "resource://gre/modules/addons/AddonRepository.sys.mjs",
+});
+
+function AddonUtilsInternal() {
+ this._log = Log.repository.getLogger("Sync.AddonUtils");
+ this._log.Level = Log.Level[Svc.Prefs.get("log.logger.addonutils")];
+}
+AddonUtilsInternal.prototype = {
+ /**
+ * Obtain an AddonInstall object from an AddonSearchResult instance.
+ *
+ * The returned promise will be an AddonInstall on success or null (failure or
+ * addon not found)
+ *
+ * @param addon
+ * AddonSearchResult to obtain install from.
+ */
+ getInstallFromSearchResult(addon) {
+ this._log.debug("Obtaining install for " + addon.id);
+
+ // We should theoretically be able to obtain (and use) addon.install if
+ // it is available. However, the addon.sourceURI rewriting won't be
+ // reflected in the AddonInstall, so we can't use it. If we ever get rid
+ // of sourceURI rewriting, we can avoid having to reconstruct the
+ // AddonInstall.
+ return lazy.AddonManager.getInstallForURL(addon.sourceURI.spec, {
+ name: addon.name,
+ icons: addon.iconURL,
+ version: addon.version,
+ telemetryInfo: { source: "sync" },
+ });
+ },
+
+ /**
+ * Installs an add-on from an AddonSearchResult instance.
+ *
+ * The options argument defines extra options to control the install.
+ * Recognized keys in this map are:
+ *
+ * syncGUID - Sync GUID to use for the new add-on.
+ * enabled - Boolean indicating whether the add-on should be enabled upon
+ * install.
+ *
+ * The result object has the following keys:
+ *
+ * id ID of add-on that was installed.
+ * install AddonInstall that was installed.
+ * addon Addon that was installed.
+ *
+ * @param addon
+ * AddonSearchResult to install add-on from.
+ * @param options
+ * Object with additional metadata describing how to install add-on.
+ */
+ async installAddonFromSearchResult(addon, options) {
+ this._log.info("Trying to install add-on from search result: " + addon.id);
+
+ const install = await this.getInstallFromSearchResult(addon);
+ if (!install) {
+ throw new Error("AddonInstall not available: " + addon.id);
+ }
+
+ try {
+ this._log.info("Installing " + addon.id);
+ let log = this._log;
+
+ return new Promise((res, rej) => {
+ let listener = {
+ onInstallStarted: function onInstallStarted(install) {
+ if (!options) {
+ return;
+ }
+
+ if (options.syncGUID) {
+ log.info(
+ "Setting syncGUID of " + install.name + ": " + options.syncGUID
+ );
+ install.addon.syncGUID = options.syncGUID;
+ }
+
+ // We only need to change userDisabled if it is disabled because
+ // enabled is the default.
+ if ("enabled" in options && !options.enabled) {
+ log.info(
+ "Marking add-on as disabled for install: " + install.name
+ );
+ install.addon.disable();
+ }
+ },
+ onInstallEnded(install, addon) {
+ install.removeListener(listener);
+
+ res({ id: addon.id, install, addon });
+ },
+ onInstallFailed(install) {
+ install.removeListener(listener);
+
+ rej(new Error("Install failed: " + install.error));
+ },
+ onDownloadFailed(install) {
+ install.removeListener(listener);
+
+ rej(new Error("Download failed: " + install.error));
+ },
+ };
+ install.addListener(listener);
+ install.install();
+ });
+ } catch (ex) {
+ this._log.error("Error installing add-on", ex);
+ throw ex;
+ }
+ },
+
+ /**
+ * Uninstalls the addon instance.
+ *
+ * @param addon
+ * Addon instance to uninstall.
+ */
+ async uninstallAddon(addon) {
+ return new Promise(res => {
+ let listener = {
+ onUninstalling(uninstalling, needsRestart) {
+ if (addon.id != uninstalling.id) {
+ return;
+ }
+
+ // We assume restartless add-ons will send the onUninstalled event
+ // soon.
+ if (!needsRestart) {
+ return;
+ }
+
+ // For non-restartless add-ons, we issue the callback on uninstalling
+ // because we will likely never see the uninstalled event.
+ lazy.AddonManager.removeAddonListener(listener);
+ res(addon);
+ },
+ onUninstalled(uninstalled) {
+ if (addon.id != uninstalled.id) {
+ return;
+ }
+
+ lazy.AddonManager.removeAddonListener(listener);
+ res(addon);
+ },
+ };
+ lazy.AddonManager.addAddonListener(listener);
+ addon.uninstall();
+ });
+ },
+
+ /**
+ * Installs multiple add-ons specified by metadata.
+ *
+ * The first argument is an array of objects. Each object must have the
+ * following keys:
+ *
+ * id - public ID of the add-on to install.
+ * syncGUID - syncGUID for new add-on.
+ * enabled - boolean indicating whether the add-on should be enabled.
+ * requireSecureURI - Boolean indicating whether to require a secure
+ * URI when installing from a remote location. This defaults to
+ * true.
+ *
+ * The callback will be called when activity on all add-ons is complete. The
+ * callback receives 2 arguments, error and result.
+ *
+ * If error is truthy, it contains a string describing the overall error.
+ *
+ * The 2nd argument to the callback is always an object with details on the
+ * overall execution state. It contains the following keys:
+ *
+ * installedIDs Array of add-on IDs that were installed.
+ * installs Array of AddonInstall instances that were installed.
+ * addons Array of Addon instances that were installed.
+ * errors Array of errors encountered. Only has elements if error is
+ * truthy.
+ *
+ * @param installs
+ * Array of objects describing add-ons to install.
+ */
+ async installAddons(installs) {
+ let ids = [];
+ for (let addon of installs) {
+ ids.push(addon.id);
+ }
+
+ let addons = await lazy.AddonRepository.getAddonsByIDs(ids);
+ this._log.info(
+ `Found ${addons.length} / ${ids.length}` +
+ " add-ons during repository search."
+ );
+
+ let ourResult = {
+ installedIDs: [],
+ installs: [],
+ addons: [],
+ skipped: [],
+ errors: [],
+ };
+
+ let toInstall = [];
+
+ // Rewrite the "src" query string parameter of the source URI to note
+ // that the add-on was installed by Sync and not something else so
+ // server-side metrics aren't skewed (bug 708134). The server should
+ // ideally send proper URLs, but this solution was deemed too
+ // complicated at the time the functionality was implemented.
+ for (let addon of addons) {
+ // Find the specified options for this addon.
+ let options;
+ for (let install of installs) {
+ if (install.id == addon.id) {
+ options = install;
+ break;
+ }
+ }
+ if (!this.canInstallAddon(addon, options)) {
+ ourResult.skipped.push(addon.id);
+ continue;
+ }
+
+ // We can go ahead and attempt to install it.
+ toInstall.push(addon);
+
+ // We should always be able to QI the nsIURI to nsIURL. If not, we
+ // still try to install the add-on, but we don't rewrite the URL,
+ // potentially skewing metrics.
+ try {
+ addon.sourceURI.QueryInterface(Ci.nsIURL);
+ } catch (ex) {
+ this._log.warn(
+ "Unable to QI sourceURI to nsIURL: " + addon.sourceURI.spec
+ );
+ continue;
+ }
+
+ let params = addon.sourceURI.query
+ .split("&")
+ .map(function rewrite(param) {
+ if (param.indexOf("src=") == 0) {
+ return "src=sync";
+ }
+ return param;
+ });
+
+ addon.sourceURI = addon.sourceURI
+ .mutate()
+ .setQuery(params.join("&"))
+ .finalize();
+ }
+
+ if (!toInstall.length) {
+ return ourResult;
+ }
+
+ const installPromises = [];
+ // Start all the installs asynchronously. They will report back to us
+ // as they finish, eventually triggering the global callback.
+ for (let addon of toInstall) {
+ let options = {};
+ for (let install of installs) {
+ if (install.id == addon.id) {
+ options = install;
+ break;
+ }
+ }
+
+ installPromises.push(
+ (async () => {
+ try {
+ const result = await this.installAddonFromSearchResult(
+ addon,
+ options
+ );
+ ourResult.installedIDs.push(result.id);
+ ourResult.installs.push(result.install);
+ ourResult.addons.push(result.addon);
+ } catch (error) {
+ ourResult.errors.push(error);
+ }
+ })()
+ );
+ }
+
+ await Promise.all(installPromises);
+
+ if (ourResult.errors.length) {
+ throw new Error("1 or more add-ons failed to install");
+ }
+ return ourResult;
+ },
+
+ /**
+ * Returns true if we are able to install the specified addon, false
+ * otherwise. It is expected that this will log the reason if it returns
+ * false.
+ *
+ * @param addon
+ * (Addon) Add-on instance to check.
+ * @param options
+ * (object) The options specified for this addon. See installAddons()
+ * for the valid elements.
+ */
+ canInstallAddon(addon, options) {
+ // sourceURI presence isn't enforced by AddonRepository. So, we skip
+ // add-ons without a sourceURI.
+ if (!addon.sourceURI) {
+ this._log.info(
+ "Skipping install of add-on because missing sourceURI: " + addon.id
+ );
+ return false;
+ }
+ // Verify that the source URI uses TLS. We don't allow installs from
+ // insecure sources for security reasons. The Addon Manager ensures
+ // that cert validation etc is performed.
+ // (We should also consider just dropping this entirely and calling
+ // XPIProvider.isInstallAllowed, but that has additional semantics we might
+ // need to think through...)
+ let requireSecureURI = true;
+ if (options && options.requireSecureURI !== undefined) {
+ requireSecureURI = options.requireSecureURI;
+ }
+
+ if (requireSecureURI) {
+ let scheme = addon.sourceURI.scheme;
+ if (scheme != "https") {
+ this._log.info(
+ `Skipping install of add-on "${addon.id}" because sourceURI's scheme of "${scheme}" is not trusted`
+ );
+ return false;
+ }
+ }
+
+ // Policy prevents either installing this addon or any addon
+ if (
+ Services.policies &&
+ (!Services.policies.mayInstallAddon(addon) ||
+ !Services.policies.isAllowed("xpinstall"))
+ ) {
+ this._log.info(
+ `Skipping install of "${addon.id}" due to enterprise policy`
+ );
+ return false;
+ }
+
+ this._log.info(`Add-on "${addon.id}" is able to be installed`);
+ return true;
+ },
+
+ /**
+ * Update the user disabled flag for an add-on.
+ *
+ * If the new flag matches the existing or if the add-on
+ * isn't currently active, the function will return immediately.
+ *
+ * @param addon
+ * (Addon) Add-on instance to operate on.
+ * @param value
+ * (bool) New value for add-on's userDisabled property.
+ */
+ updateUserDisabled(addon, value) {
+ if (addon.userDisabled == value) {
+ return;
+ }
+
+ this._log.info("Updating userDisabled flag: " + addon.id + " -> " + value);
+ if (value) {
+ addon.disable();
+ } else {
+ addon.enable();
+ }
+ },
+};
+
+export const AddonUtils = new AddonUtilsInternal();
diff --git a/services/sync/modules/bridged_engine.sys.mjs b/services/sync/modules/bridged_engine.sys.mjs
new file mode 100644
index 0000000000..45e5f685cd
--- /dev/null
+++ b/services/sync/modules/bridged_engine.sys.mjs
@@ -0,0 +1,499 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file has all the machinery for hooking up bridged engines implemented
+ * in Rust. It's the JavaScript side of the Golden Gate bridge that connects
+ * Desktop Sync to a Rust `BridgedEngine`, via the `mozIBridgedSyncEngine`
+ * XPCOM interface.
+ *
+ * Creating a bridged engine only takes a few lines of code, since most of the
+ * hard work is done on the Rust side. On the JS side, you'll need to subclass
+ * `BridgedEngine` (instead of `SyncEngine`), supply a `mozIBridgedSyncEngine`
+ * for your subclass to wrap, and optionally implement and override the tracker.
+ */
+
+import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
+import { RawCryptoWrapper } from "resource://services-sync/record.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Log: "resource://gre/modules/Log.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+});
+
+/**
+ * A stub store that converts between raw decrypted incoming records and
+ * envelopes. Since the interface we need is so minimal, this class doesn't
+ * inherit from the base `Store` implementation...it would take more code to
+ * override all those behaviors!
+ *
+ * This class isn't meant to be subclassed, because bridged engines shouldn't
+ * override their store classes in `_storeObj`.
+ */
+class BridgedStore {
+ constructor(name, engine) {
+ if (!engine) {
+ throw new Error("Store must be associated with an Engine instance.");
+ }
+ this.engine = engine;
+ this._log = lazy.Log.repository.getLogger(`Sync.Engine.${name}.Store`);
+ this._batchChunkSize = 500;
+ }
+
+ async applyIncomingBatch(records, countTelemetry) {
+ for (let chunk of lazy.PlacesUtils.chunkArray(
+ records,
+ this._batchChunkSize
+ )) {
+ let incomingEnvelopesAsJSON = chunk.map(record =>
+ JSON.stringify(record.toIncomingBso())
+ );
+ this._log.trace("incoming envelopes", incomingEnvelopesAsJSON);
+ await this.engine._bridge.storeIncoming(incomingEnvelopesAsJSON);
+ }
+ // Array of failed records.
+ return [];
+ }
+
+ async wipe() {
+ await this.engine._bridge.wipe();
+ }
+}
+
+/**
+ * A wrapper class to convert between BSOs on the JS side, and envelopes on the
+ * Rust side. This class intentionally subclasses `RawCryptoWrapper`, because we
+ * don't want the stringification and parsing machinery in `CryptoWrapper`.
+ *
+ * This class isn't meant to be subclassed, because bridged engines shouldn't
+ * override their record classes in `_recordObj`.
+ */
+class BridgedRecord extends RawCryptoWrapper {
+ /**
+ * Creates an outgoing record from a BSO returned by a bridged engine.
+ *
+ * @param {String} collection The collection name.
+ * @param {Object} bso The outgoing bso (ie, a sync15::bso::OutgoingBso) returned from
+ * `mozIBridgedSyncEngine::apply`.
+ * @return {BridgedRecord} A Sync record ready to encrypt and upload.
+ */
+ static fromOutgoingBso(collection, bso) {
+ // The BSO has already been JSON serialized coming out of Rust, so the
+ // envelope has been flattened.
+ if (typeof bso.id != "string") {
+ throw new TypeError("Outgoing BSO missing ID");
+ }
+ if (typeof bso.payload != "string") {
+ throw new TypeError("Outgoing BSO missing payload");
+ }
+ let record = new BridgedRecord(collection, bso.id);
+ record.cleartext = bso.payload;
+ return record;
+ }
+
+ transformBeforeEncrypt(cleartext) {
+ if (typeof cleartext != "string") {
+ throw new TypeError("Outgoing bridged engine records must be strings");
+ }
+ return cleartext;
+ }
+
+ transformAfterDecrypt(cleartext) {
+ if (typeof cleartext != "string") {
+ throw new TypeError("Incoming bridged engine records must be strings");
+ }
+ return cleartext;
+ }
+
+ /*
+ * Converts this incoming record into an envelope to pass to a bridged engine.
+ * This object must be kept in sync with `sync15::IncomingBso`.
+ *
+ * @return {Object} The incoming envelope, to pass to
+ * `mozIBridgedSyncEngine::storeIncoming`.
+ */
+ toIncomingBso() {
+ return {
+ id: this.data.id,
+ modified: this.data.modified,
+ payload: this.cleartext,
+ };
+ }
+}
+
+class BridgeError extends Error {
+ constructor(code, message) {
+ super(message);
+ this.name = "BridgeError";
+ // TODO: We may want to use a different name for this, since errors with
+ // a `result` property are treated specially by telemetry, discarding the
+ // message...but, unlike other `nserror`s, the message is actually useful,
+ // and we still want to capture it.
+ this.result = code;
+ }
+}
+
+class InterruptedError extends Error {
+ constructor(message) {
+ super(message);
+ this.name = "InterruptedError";
+ }
+}
+
+/**
+ * Adapts a `Log.sys.mjs` logger to a `mozIServicesLogSink`. This class is copied
+ * from `SyncedBookmarksMirror.jsm`.
+ */
+export class LogAdapter {
+ constructor(log) {
+ this.log = log;
+ }
+
+ get maxLevel() {
+ let level = this.log.level;
+ if (level <= lazy.Log.Level.All) {
+ return Ci.mozIServicesLogSink.LEVEL_TRACE;
+ }
+ if (level <= lazy.Log.Level.Info) {
+ return Ci.mozIServicesLogSink.LEVEL_DEBUG;
+ }
+ if (level <= lazy.Log.Level.Warn) {
+ return Ci.mozIServicesLogSink.LEVEL_WARN;
+ }
+ if (level <= lazy.Log.Level.Error) {
+ return Ci.mozIServicesLogSink.LEVEL_ERROR;
+ }
+ return Ci.mozIServicesLogSink.LEVEL_OFF;
+ }
+
+ trace(message) {
+ this.log.trace(message);
+ }
+
+ debug(message) {
+ this.log.debug(message);
+ }
+
+ warn(message) {
+ this.log.warn(message);
+ }
+
+ error(message) {
+ this.log.error(message);
+ }
+}
+
+// This converts the XPCOM-defined, callback-based mozIBridgedSyncEngine to
+// a promise-based implementation.
+export class BridgeWrapperXPCOM {
+ constructor(component) {
+ this.comp = component;
+ }
+
+ // A few sync, non-callback based attributes.
+ get storageVersion() {
+ return this.comp.storageVersion;
+ }
+
+ get allowSkippedRecord() {
+ return this.comp.allowSkippedRecord;
+ }
+
+ get logger() {
+ return this.comp.logger;
+ }
+
+ // And the async functions we promisify.
+ // Note this is `lastSync` via uniffi but `getLastSync` via xpcom
+ lastSync() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.getLastSync);
+ }
+
+ setLastSync(lastSyncMillis) {
+ return BridgeWrapperXPCOM.#promisify(this.comp.setLastSync, lastSyncMillis);
+ }
+
+ getSyncId() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.getSyncId);
+ }
+
+ resetSyncId() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.resetSyncId);
+ }
+
+ ensureCurrentSyncId(newSyncId) {
+ return BridgeWrapperXPCOM.#promisify(
+ this.comp.ensureCurrentSyncId,
+ newSyncId
+ );
+ }
+
+ syncStarted() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.syncStarted);
+ }
+
+ storeIncoming(incomingEnvelopesAsJSON) {
+ return BridgeWrapperXPCOM.#promisify(
+ this.comp.storeIncoming,
+ incomingEnvelopesAsJSON
+ );
+ }
+
+ apply() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.apply);
+ }
+
+ setUploaded(newTimestampMillis, uploadedIds) {
+ return BridgeWrapperXPCOM.#promisify(
+ this.comp.setUploaded,
+ newTimestampMillis,
+ uploadedIds
+ );
+ }
+
+ syncFinished() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.syncFinished);
+ }
+
+ reset() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.reset);
+ }
+
+ wipe() {
+ return BridgeWrapperXPCOM.#promisify(this.comp.wipe);
+ }
+
+ // Converts a XPCOM bridged function that takes a callback into one that returns a
+ // promise.
+ static #promisify(func, ...params) {
+ return new Promise((resolve, reject) => {
+ func(...params, {
+ // This object implicitly implements all three callback interfaces
+ // (`mozIBridgedSyncEngine{Apply, Result}Callback`), because they have
+ // the same methods. The only difference is the type of the argument
+ // passed to `handleSuccess`, which doesn't matter in JS.
+ handleSuccess: resolve,
+ handleError(code, message) {
+ reject(transformError(code, message));
+ },
+ });
+ });
+ }
+}
+
+/**
+ * A base class used to plug a Rust engine into Sync, and have it work like any
+ * other engine. The constructor takes a bridge as its first argument, which is
+ * a "bridged sync engine", as defined by UniFFI in the application-services
+ * crate.
+ * For backwards compatibility, this can also be an instance of an XPCOM
+ * component class that implements `mozIBridgedSyncEngine`, wrapped in
+ * a `BridgeWrapperXPCOM` wrapper.
+ * (Note that at time of writing, the above is slightly aspirational; the
+ * actual definition of the UniFFI shared bridged engine is still in flux.)
+ *
+ * This class inherits from `SyncEngine`, which has a lot of machinery that we
+ * don't need, but that's fairly easy to override. It would be harder to
+ * reimplement the machinery that we _do_ need here. However, because of that,
+ * this class has lots of methods that do nothing, or return empty data. The
+ * docs above each method explain what it's overriding, and why.
+ *
+ * This class is designed to be subclassed, but the only part that your engine
+ * may want to override is `_trackerObj`. Even then, using the default (no-op)
+ * tracker is fine, because the shape of the `Tracker` interface may not make
+ * sense for all engines.
+ */
+export function BridgedEngine(name, service) {
+ SyncEngine.call(this, name, service);
+}
+
+BridgedEngine.prototype = {
+ /**
+ * The Rust implemented bridge. Must be set by the engine which subclasses us.
+ */
+ _bridge: null,
+ /**
+ * The tracker class for this engine. Subclasses may want to override this
+ * with their own tracker, though using the default `Tracker` is fine.
+ */
+ _trackerObj: Tracker,
+
+ /** Returns the record class for all bridged engines. */
+ get _recordObj() {
+ return BridgedRecord;
+ },
+
+ set _recordObj(obj) {
+ throw new TypeError("Don't override the record class for bridged engines");
+ },
+
+ /** Returns the store class for all bridged engines. */
+ get _storeObj() {
+ return BridgedStore;
+ },
+
+ set _storeObj(obj) {
+ throw new TypeError("Don't override the store class for bridged engines");
+ },
+
+ /** Returns the storage version for this engine. */
+ get version() {
+ return this._bridge.storageVersion;
+ },
+
+ // Legacy engines allow sync to proceed if some records are too large to
+ // upload (eg, a payload that's bigger than the server's published limits).
+ // If this returns true, we will just skip the record without even attempting
+ // to upload. If this is false, we'll abort the entire batch.
+ // If the engine allows this, it will need to detect this scenario by noticing
+ // the ID is not in the 'success' records reported to `setUploaded`.
+ // (Note that this is not to be confused with the fact server's can currently
+ // reject records as part of a POST - but we hope to remove this ability from
+ // the server API. Note also that this is not bullet-proof - if the count of
+ // records is high, it's possible that we will have committed a previous
+ // batch before we hit the relevant limits, so things might have been written.
+ // We hope to fix this by ensuring batch limits are such that this is
+ // impossible)
+ get allowSkippedRecord() {
+ return this._bridge.allowSkippedRecord;
+ },
+
+ /**
+ * Returns the sync ID for this engine. This is exposed for tests, but
+ * Sync code always calls `resetSyncID()` and `ensureCurrentSyncID()`,
+ * not this.
+ *
+ * @returns {String?} The sync ID, or `null` if one isn't set.
+ */
+ async getSyncID() {
+ // Note that all methods on an XPCOM class instance are automatically bound,
+ // so we don't need to write `this._bridge.getSyncId.bind(this._bridge)`.
+ let syncID = await this._bridge.getSyncId();
+ return syncID;
+ },
+
+ async resetSyncID() {
+ await this._deleteServerCollection();
+ let newSyncID = await this.resetLocalSyncID();
+ return newSyncID;
+ },
+
+ async resetLocalSyncID() {
+ let newSyncID = await this._bridge.resetSyncId();
+ return newSyncID;
+ },
+
+ async ensureCurrentSyncID(newSyncID) {
+ let assignedSyncID = await this._bridge.ensureCurrentSyncId(newSyncID);
+ return assignedSyncID;
+ },
+
+ async getLastSync() {
+ // The bridge defines lastSync as integer ms, but sync itself wants to work
+ // in a float seconds with 2 decimal places.
+ let lastSyncMS = await this._bridge.lastSync();
+ return Math.round(lastSyncMS / 10) / 100;
+ },
+
+ async setLastSync(lastSyncSeconds) {
+ await this._bridge.setLastSync(Math.round(lastSyncSeconds * 1000));
+ },
+
+ /**
+ * Returns the initial changeset for the sync. Bridged engines handle
+ * reconciliation internally, so we don't know what changed until after we've
+ * stored and applied all incoming records. So we return an empty changeset
+ * here, and replace it with the real one in `_processIncoming`.
+ */
+ async pullChanges() {
+ return {};
+ },
+
+ async trackRemainingChanges() {
+ await this._bridge.syncFinished();
+ },
+
+ /**
+ * Marks a record for a hard-`DELETE` at the end of the sync. The base method
+ * also removes it from the tracker, but we don't use the tracker for that,
+ * so we override the method to just mark.
+ */
+ _deleteId(id) {
+ this._noteDeletedId(id);
+ },
+
+ /**
+ * Always stage incoming records, bypassing the base engine's reconciliation
+ * machinery.
+ */
+ async _reconcile() {
+ return true;
+ },
+
+ async _syncStartup() {
+ await super._syncStartup();
+ await this._bridge.syncStarted();
+ },
+
+ async _processIncoming(newitems) {
+ await super._processIncoming(newitems);
+
+ let outgoingBsosAsJSON = await this._bridge.apply();
+ let changeset = {};
+ for (let bsoAsJSON of outgoingBsosAsJSON) {
+ this._log.trace("outgoing bso", bsoAsJSON);
+ let record = BridgedRecord.fromOutgoingBso(
+ this.name,
+ JSON.parse(bsoAsJSON)
+ );
+ changeset[record.id] = {
+ synced: false,
+ record,
+ };
+ }
+ this._modified.replace(changeset);
+ },
+
+ /**
+ * Notify the bridged engine that we've successfully uploaded a batch, so
+ * that it can update its local state. For example, if the engine uses a
+ * mirror and a temp table for outgoing records, it can write the uploaded
+ * records from the outgoing table back to the mirror.
+ */
+ async _onRecordsWritten(succeeded, failed, serverModifiedTime) {
+ // JS uses seconds but Rust uses milliseconds so we'll need to convert
+ let serverModifiedMS = Math.round(serverModifiedTime * 1000);
+ await this._bridge.setUploaded(Math.floor(serverModifiedMS), succeeded);
+ },
+
+ async _createTombstone() {
+ throw new Error("Bridged engines don't support weak uploads");
+ },
+
+ async _createRecord(id) {
+ let change = this._modified.changes[id];
+ if (!change) {
+ throw new TypeError("Can't create record for unchanged item");
+ }
+ return change.record;
+ },
+
+ async _resetClient() {
+ await super._resetClient();
+ await this._bridge.reset();
+ },
+};
+Object.setPrototypeOf(BridgedEngine.prototype, SyncEngine.prototype);
+
+function transformError(code, message) {
+ switch (code) {
+ case Cr.NS_ERROR_ABORT:
+ return new InterruptedError(message);
+
+ default:
+ return new BridgeError(code, message);
+ }
+}
diff --git a/services/sync/modules/collection_validator.sys.mjs b/services/sync/modules/collection_validator.sys.mjs
new file mode 100644
index 0000000000..a64ede10e9
--- /dev/null
+++ b/services/sync/modules/collection_validator.sys.mjs
@@ -0,0 +1,267 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Async: "resource://services-common/async.sys.mjs",
+});
+
+export class CollectionProblemData {
+ constructor() {
+ this.missingIDs = 0;
+ this.clientDuplicates = [];
+ this.duplicates = [];
+ this.clientMissing = [];
+ this.serverMissing = [];
+ this.serverDeleted = [];
+ this.serverUnexpected = [];
+ this.differences = [];
+ }
+
+ /**
+ * Produce a list summarizing problems found. Each entry contains {name, count},
+ * where name is the field name for the problem, and count is the number of times
+ * the problem was encountered.
+ *
+ * Validation has failed if all counts are not 0.
+ */
+ getSummary() {
+ return [
+ { name: "clientMissing", count: this.clientMissing.length },
+ { name: "serverMissing", count: this.serverMissing.length },
+ { name: "serverDeleted", count: this.serverDeleted.length },
+ { name: "serverUnexpected", count: this.serverUnexpected.length },
+ { name: "differences", count: this.differences.length },
+ { name: "missingIDs", count: this.missingIDs },
+ { name: "clientDuplicates", count: this.clientDuplicates.length },
+ { name: "duplicates", count: this.duplicates.length },
+ ];
+ }
+}
+
+export class CollectionValidator {
+ // Construct a generic collection validator. This is intended to be called by
+ // subclasses.
+ // - name: Name of the engine
+ // - idProp: Property that identifies a record. That is, if a client and server
+ // record have the same value for the idProp property, they should be
+ // compared against eachother.
+ // - props: Array of properties that should be compared
+ constructor(name, idProp, props) {
+ this.name = name;
+ this.props = props;
+ this.idProp = idProp;
+
+ // This property deals with the fact that form history records are never
+ // deleted from the server. The FormValidator subclass needs to ignore the
+ // client missing records, and it uses this property to achieve it -
+ // (Bug 1354016).
+ this.ignoresMissingClients = false;
+ }
+
+ // Should a custom ProblemData type be needed, return it here.
+ emptyProblemData() {
+ return new CollectionProblemData();
+ }
+
+ async getServerItems(engine) {
+ let collection = engine.itemSource();
+ let collectionKey = engine.service.collectionKeys.keyForCollection(
+ engine.name
+ );
+ collection.full = true;
+ let result = await collection.getBatched();
+ if (!result.response.success) {
+ throw result.response;
+ }
+ let cleartexts = [];
+
+ await lazy.Async.yieldingForEach(result.records, async record => {
+ await record.decrypt(collectionKey);
+ cleartexts.push(record.cleartext);
+ });
+
+ return cleartexts;
+ }
+
+ // Should return a promise that resolves to an array of client items.
+ getClientItems() {
+ return Promise.reject("Must implement");
+ }
+
+ /**
+ * Can we guarantee validation will fail with a reason that isn't actually a
+ * problem? For example, if we know there are pending changes left over from
+ * the last sync, this should resolve to false. By default resolves to true.
+ */
+ async canValidate() {
+ return true;
+ }
+
+ // Turn the client item into something that can be compared with the server item,
+ // and is also safe to mutate.
+ normalizeClientItem(item) {
+ return Cu.cloneInto(item, {});
+ }
+
+ // Turn the server item into something that can be easily compared with the client
+ // items.
+ async normalizeServerItem(item) {
+ return item;
+ }
+
+ // Return whether or not a server item should be present on the client. Expected
+ // to be overridden.
+ clientUnderstands(item) {
+ return true;
+ }
+
+ // Return whether or not a client item should be present on the server. Expected
+ // to be overridden
+ async syncedByClient(item) {
+ return true;
+ }
+
+ // Compare the server item and the client item, and return a list of property
+ // names that are different. Can be overridden if needed.
+ getDifferences(client, server) {
+ let differences = [];
+ for (let prop of this.props) {
+ let clientProp = client[prop];
+ let serverProp = server[prop];
+ if ((clientProp || "") !== (serverProp || "")) {
+ differences.push(prop);
+ }
+ }
+ return differences;
+ }
+
+ // Returns an object containing
+ // problemData: an instance of the class returned by emptyProblemData(),
+ // clientRecords: Normalized client records
+ // records: Normalized server records,
+ // deletedRecords: Array of ids that were marked as deleted by the server.
+ async compareClientWithServer(clientItems, serverItems) {
+ const yieldState = lazy.Async.yieldState();
+
+ const clientRecords = [];
+
+ await lazy.Async.yieldingForEach(
+ clientItems,
+ item => {
+ clientRecords.push(this.normalizeClientItem(item));
+ },
+ yieldState
+ );
+
+ const serverRecords = [];
+ await lazy.Async.yieldingForEach(
+ serverItems,
+ async item => {
+ serverRecords.push(await this.normalizeServerItem(item));
+ },
+ yieldState
+ );
+
+ let problems = this.emptyProblemData();
+ let seenServer = new Map();
+ let serverDeleted = new Set();
+ let allRecords = new Map();
+
+ for (let record of serverRecords) {
+ let id = record[this.idProp];
+ if (!id) {
+ ++problems.missingIDs;
+ continue;
+ }
+ if (record.deleted) {
+ serverDeleted.add(record);
+ } else {
+ let serverHasPossibleDupe = seenServer.has(id);
+ if (serverHasPossibleDupe) {
+ problems.duplicates.push(id);
+ } else {
+ seenServer.set(id, record);
+ allRecords.set(id, { server: record, client: null });
+ }
+ record.understood = this.clientUnderstands(record);
+ }
+ }
+
+ let seenClient = new Map();
+ for (let record of clientRecords) {
+ let id = record[this.idProp];
+ record.shouldSync = await this.syncedByClient(record);
+ let clientHasPossibleDupe = seenClient.has(id);
+ if (clientHasPossibleDupe && record.shouldSync) {
+ // Only report duplicate client IDs for syncable records.
+ problems.clientDuplicates.push(id);
+ continue;
+ }
+ seenClient.set(id, record);
+ let combined = allRecords.get(id);
+ if (combined) {
+ combined.client = record;
+ } else {
+ allRecords.set(id, { client: record, server: null });
+ }
+ }
+
+ for (let [id, { server, client }] of allRecords) {
+ if (!client && !server) {
+ throw new Error("Impossible: no client or server record for " + id);
+ } else if (server && !client) {
+ if (!this.ignoresMissingClients && server.understood) {
+ problems.clientMissing.push(id);
+ }
+ } else if (client && !server) {
+ if (client.shouldSync) {
+ problems.serverMissing.push(id);
+ }
+ } else {
+ if (!client.shouldSync) {
+ if (!problems.serverUnexpected.includes(id)) {
+ problems.serverUnexpected.push(id);
+ }
+ continue;
+ }
+ let differences = this.getDifferences(client, server);
+ if (differences && differences.length) {
+ problems.differences.push({ id, differences });
+ }
+ }
+ }
+ return {
+ problemData: problems,
+ clientRecords,
+ records: serverRecords,
+ deletedRecords: [...serverDeleted],
+ };
+ }
+
+ async validate(engine) {
+ let start = Cu.now();
+ let clientItems = await this.getClientItems();
+ let serverItems = await this.getServerItems(engine);
+ let serverRecordCount = serverItems.length;
+ let result = await this.compareClientWithServer(clientItems, serverItems);
+ let end = Cu.now();
+ let duration = end - start;
+ engine._log.debug(`Validated ${this.name} in ${duration}ms`);
+ engine._log.debug(`Problem summary`);
+ for (let { name, count } of result.problemData.getSummary()) {
+ engine._log.debug(` ${name}: ${count}`);
+ }
+ return {
+ duration,
+ version: this.version,
+ problems: result.problemData,
+ recordCount: serverRecordCount,
+ };
+ }
+}
+
+// Default to 0, some engines may override.
+CollectionValidator.prototype.version = 0;
diff --git a/services/sync/modules/constants.sys.mjs b/services/sync/modules/constants.sys.mjs
new file mode 100644
index 0000000000..5db9f53a24
--- /dev/null
+++ b/services/sync/modules/constants.sys.mjs
@@ -0,0 +1,133 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Don't manually modify this line, as it is automatically replaced on merge day
+// by the gecko_migration.py script.
+export const WEAVE_VERSION = "1.117.0";
+
+// Sync Server API version that the client supports.
+export const SYNC_API_VERSION = "1.5";
+
+// Version of the data format this client supports. The data format describes
+// how records are packaged; this is separate from the Server API version and
+// the per-engine cleartext formats.
+export const STORAGE_VERSION = 5;
+export const PREFS_BRANCH = "services.sync.";
+
+// Put in [] because those aren't allowed in a collection name.
+export const DEFAULT_KEYBUNDLE_NAME = "[default]";
+
+// Key dimensions.
+export const SYNC_KEY_ENCODED_LENGTH = 26;
+export const SYNC_KEY_DECODED_LENGTH = 16;
+
+export const NO_SYNC_NODE_INTERVAL = 10 * 60 * 1000; // 10 minutes
+
+export const MAX_ERROR_COUNT_BEFORE_BACKOFF = 3;
+
+// Backoff intervals
+export const MINIMUM_BACKOFF_INTERVAL = 15 * 60 * 1000; // 15 minutes
+export const MAXIMUM_BACKOFF_INTERVAL = 8 * 60 * 60 * 1000; // 8 hours
+
+// HMAC event handling timeout.
+// 10 minutes = a compromise between the multi-desktop sync interval
+// and the mobile sync interval.
+export const HMAC_EVENT_INTERVAL = 600000;
+
+// How long to wait between sync attempts if the Master Password is locked.
+export const MASTER_PASSWORD_LOCKED_RETRY_INTERVAL = 15 * 60 * 1000; // 15 minutes
+
+// 50 is hardcoded here because of URL length restrictions.
+// (GUIDs can be up to 64 chars long.)
+// Individual engines can set different values for their limit if their
+// identifiers are shorter.
+export const DEFAULT_GUID_FETCH_BATCH_SIZE = 50;
+
+// Default batch size for download batching
+// (how many records are fetched at a time from the server when batching is used).
+export const DEFAULT_DOWNLOAD_BATCH_SIZE = 1000;
+
+// score thresholds for early syncs
+export const SINGLE_USER_THRESHOLD = 1000;
+export const MULTI_DEVICE_THRESHOLD = 300;
+
+// Other score increment constants
+export const SCORE_INCREMENT_SMALL = 1;
+export const SCORE_INCREMENT_MEDIUM = 10;
+
+// Instant sync score increment
+export const SCORE_INCREMENT_XLARGE = 300 + 1; //MULTI_DEVICE_THRESHOLD + 1
+
+// Delay before incrementing global score
+export const SCORE_UPDATE_DELAY = 100;
+
+// Delay for the back observer debouncer. This is chosen to be longer than any
+// observed spurious idle/back events and short enough to pre-empt user activity.
+export const IDLE_OBSERVER_BACK_DELAY = 100;
+
+// Duplicate URI_LENGTH_MAX from Places (from nsNavHistory.h), used to discard
+// tabs with huge uris during tab sync.
+export const URI_LENGTH_MAX = 65536;
+
+export const MAX_HISTORY_UPLOAD = 5000;
+export const MAX_HISTORY_DOWNLOAD = 5000;
+
+// Top-level statuses
+export const STATUS_OK = "success.status_ok";
+export const SYNC_FAILED = "error.sync.failed";
+export const LOGIN_FAILED = "error.login.failed";
+export const SYNC_FAILED_PARTIAL = "error.sync.failed_partial";
+export const CLIENT_NOT_CONFIGURED = "service.client_not_configured";
+export const STATUS_DISABLED = "service.disabled";
+export const MASTER_PASSWORD_LOCKED = "service.master_password_locked";
+
+// success states
+export const LOGIN_SUCCEEDED = "success.login";
+export const SYNC_SUCCEEDED = "success.sync";
+export const ENGINE_SUCCEEDED = "success.engine";
+
+// login failure status codes
+export const LOGIN_FAILED_NO_USERNAME = "error.login.reason.no_username";
+export const LOGIN_FAILED_NO_PASSPHRASE = "error.login.reason.no_recoverykey";
+export const LOGIN_FAILED_NETWORK_ERROR = "error.login.reason.network";
+export const LOGIN_FAILED_SERVER_ERROR = "error.login.reason.server";
+export const LOGIN_FAILED_INVALID_PASSPHRASE = "error.login.reason.recoverykey";
+export const LOGIN_FAILED_LOGIN_REJECTED = "error.login.reason.account";
+
+// sync failure status codes
+export const METARECORD_DOWNLOAD_FAIL =
+ "error.sync.reason.metarecord_download_fail";
+export const VERSION_OUT_OF_DATE = "error.sync.reason.version_out_of_date";
+export const CREDENTIALS_CHANGED = "error.sync.reason.credentials_changed";
+export const ABORT_SYNC_COMMAND = "aborting sync, process commands said so";
+export const NO_SYNC_NODE_FOUND = "error.sync.reason.no_node_found";
+export const OVER_QUOTA = "error.sync.reason.over_quota";
+export const SERVER_MAINTENANCE = "error.sync.reason.serverMaintenance";
+
+export const RESPONSE_OVER_QUOTA = "14";
+
+// engine failure status codes
+export const ENGINE_UPLOAD_FAIL = "error.engine.reason.record_upload_fail";
+export const ENGINE_DOWNLOAD_FAIL = "error.engine.reason.record_download_fail";
+export const ENGINE_UNKNOWN_FAIL = "error.engine.reason.unknown_fail";
+export const ENGINE_APPLY_FAIL = "error.engine.reason.apply_fail";
+// an upload failure where the batch was interrupted with a 412
+export const ENGINE_BATCH_INTERRUPTED = "error.engine.reason.batch_interrupted";
+
+// Ways that a sync can be disabled (messages only to be printed in debug log)
+export const kSyncMasterPasswordLocked =
+ "User elected to leave Primary Password locked";
+export const kSyncWeaveDisabled = "Weave is disabled";
+export const kSyncNetworkOffline = "Network is offline";
+export const kSyncBackoffNotMet =
+ "Trying to sync before the server said it's okay";
+export const kFirstSyncChoiceNotMade =
+ "User has not selected an action for first sync";
+export const kSyncNotConfigured = "Sync is not configured";
+export const kFirefoxShuttingDown = "Firefox is about to shut down";
+
+export const DEVICE_TYPE_DESKTOP = "desktop";
+export const DEVICE_TYPE_MOBILE = "mobile";
+
+export const SQLITE_MAX_VARIABLE_NUMBER = 999;
diff --git a/services/sync/modules/doctor.sys.mjs b/services/sync/modules/doctor.sys.mjs
new file mode 100644
index 0000000000..fe9385c3ff
--- /dev/null
+++ b/services/sync/modules/doctor.sys.mjs
@@ -0,0 +1,188 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// A doctor for our collections. She can be asked to make a consultation, and
+// may just diagnose an issue without attempting to cure it, may diagnose and
+// attempt to cure, or may decide she is overworked and underpaid.
+// Or something - naming is hard :)
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { Observers } from "resource://services-common/observers.sys.mjs";
+import { Service } from "resource://services-sync/service.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { Svc } from "resource://services-sync/util.sys.mjs";
+
+const log = Log.repository.getLogger("Sync.Doctor");
+
+export var Doctor = {
+ async consult(recentlySyncedEngines) {
+ if (!Services.telemetry.canRecordBase) {
+ log.info("Skipping consultation: telemetry reporting is disabled");
+ return;
+ }
+
+ let engineInfos = this._getEnginesToValidate(recentlySyncedEngines);
+
+ await this._runValidators(engineInfos);
+ },
+
+ _getEnginesToValidate(recentlySyncedEngines) {
+ let result = {};
+ for (let e of recentlySyncedEngines) {
+ let prefPrefix = `engine.${e.name}.`;
+ if (!Svc.Prefs.get(prefPrefix + "validation.enabled", false)) {
+ log.info(`Skipping check of ${e.name} - disabled via preferences`);
+ continue;
+ }
+ // Check the last validation time for the engine.
+ let lastValidation = Svc.Prefs.get(prefPrefix + "validation.lastTime", 0);
+ let validationInterval = Svc.Prefs.get(
+ prefPrefix + "validation.interval"
+ );
+ let nowSeconds = this._now();
+
+ if (nowSeconds - lastValidation < validationInterval) {
+ log.info(
+ `Skipping validation of ${e.name}: too recent since last validation attempt`
+ );
+ continue;
+ }
+ // Update the time now, even if we decline to actually perform a
+ // validation. We don't want to check the rest of these more frequently
+ // than once a day.
+ Svc.Prefs.set(prefPrefix + "validation.lastTime", Math.floor(nowSeconds));
+
+ // Validation only occurs a certain percentage of the time.
+ let validationProbability =
+ Svc.Prefs.get(prefPrefix + "validation.percentageChance", 0) / 100.0;
+ if (validationProbability < Math.random()) {
+ log.info(
+ `Skipping validation of ${e.name}: Probability threshold not met`
+ );
+ continue;
+ }
+
+ let maxRecords = Svc.Prefs.get(prefPrefix + "validation.maxRecords");
+ if (!maxRecords) {
+ log.info(`Skipping validation of ${e.name}: No maxRecords specified`);
+ continue;
+ }
+ // OK, so this is a candidate - the final decision will be based on the
+ // number of records actually found.
+ result[e.name] = { engine: e, maxRecords };
+ }
+ return result;
+ },
+
+ async _runValidators(engineInfos) {
+ if (!Object.keys(engineInfos).length) {
+ log.info("Skipping validation: no engines qualify");
+ return;
+ }
+
+ if (Object.values(engineInfos).filter(i => i.maxRecords != -1).length) {
+ // at least some of the engines have maxRecord restrictions which require
+ // us to ask the server for the counts.
+ let countInfo = await this._fetchCollectionCounts();
+ for (let [engineName, recordCount] of Object.entries(countInfo)) {
+ if (engineName in engineInfos) {
+ engineInfos[engineName].recordCount = recordCount;
+ }
+ }
+ }
+
+ for (let [
+ engineName,
+ { engine, maxRecords, recordCount },
+ ] of Object.entries(engineInfos)) {
+ // maxRecords of -1 means "any number", so we can skip asking the server.
+ // Used for tests.
+ if (maxRecords >= 0 && recordCount > maxRecords) {
+ log.debug(
+ `Skipping validation for ${engineName} because ` +
+ `the number of records (${recordCount}) is greater ` +
+ `than the maximum allowed (${maxRecords}).`
+ );
+ continue;
+ }
+ let validator = engine.getValidator();
+ if (!validator) {
+ // This is probably only possible in profile downgrade cases.
+ log.warn(
+ `engine.getValidator returned null for ${engineName} but the pref that controls validation is enabled.`
+ );
+ continue;
+ }
+
+ if (!(await validator.canValidate())) {
+ log.debug(
+ `Skipping validation for ${engineName} because validator.canValidate() is false`
+ );
+ continue;
+ }
+
+ // Let's do it!
+ Services.console.logStringMessage(
+ `Sync is about to run a consistency check of ${engine.name}. This may be slow, and ` +
+ `can be controlled using the pref "services.sync.${engine.name}.validation.enabled".\n` +
+ `If you encounter any problems because of this, please file a bug.`
+ );
+
+ try {
+ log.info(`Running validator for ${engine.name}`);
+ let result = await validator.validate(engine);
+ let { problems, version, duration, recordCount } = result;
+ Observers.notify(
+ "weave:engine:validate:finish",
+ {
+ version,
+ checked: recordCount,
+ took: duration,
+ problems: problems ? problems.getSummary(true) : null,
+ },
+ engine.name
+ );
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ log.error(`Failed to run validation on ${engine.name}!`, ex);
+ Observers.notify("weave:engine:validate:error", ex, engine.name);
+ // Keep validating -- there's no reason to think that a failure for one
+ // validator would mean the others will fail.
+ }
+ }
+ },
+
+ // mainly for mocking.
+ async _fetchCollectionCounts() {
+ let collectionCountsURL = Service.userBaseURL + "info/collection_counts";
+ try {
+ let infoResp = await Service._fetchInfo(collectionCountsURL);
+ if (!infoResp.success) {
+ log.error(
+ "Can't fetch collection counts: request to info/collection_counts responded with " +
+ infoResp.status
+ );
+ return {};
+ }
+ return infoResp.obj; // might throw because obj is a getter which parses json.
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ // Not running validation is totally fine, so we just write an error log and return.
+ log.error("Caught error when fetching counts", ex);
+ return {};
+ }
+ },
+
+ // functions used so tests can mock them
+ _now() {
+ // We use the server time, which is SECONDS
+ return Resource.serverTime;
+ },
+};
diff --git a/services/sync/modules/engines.sys.mjs b/services/sync/modules/engines.sys.mjs
new file mode 100644
index 0000000000..dc3fa185b2
--- /dev/null
+++ b/services/sync/modules/engines.sys.mjs
@@ -0,0 +1,2260 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { JSONFile } from "resource://gre/modules/JSONFile.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { Observers } from "resource://services-common/observers.sys.mjs";
+
+import {
+ DEFAULT_DOWNLOAD_BATCH_SIZE,
+ DEFAULT_GUID_FETCH_BATCH_SIZE,
+ ENGINE_BATCH_INTERRUPTED,
+ ENGINE_DOWNLOAD_FAIL,
+ ENGINE_UPLOAD_FAIL,
+ VERSION_OUT_OF_DATE,
+} from "resource://services-sync/constants.sys.mjs";
+
+import {
+ Collection,
+ CryptoWrapper,
+} from "resource://services-sync/record.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import {
+ SerializableSet,
+ Svc,
+ Utils,
+} from "resource://services-sync/util.sys.mjs";
+import { SyncedRecordsTelemetry } from "resource://services-sync/telemetry.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+});
+
+function ensureDirectory(path) {
+ return IOUtils.makeDirectory(PathUtils.parent(path), {
+ createAncestors: true,
+ });
+}
+
+/**
+ * Trackers are associated with a single engine and deal with
+ * listening for changes to their particular data type.
+ *
+ * The base `Tracker` only supports listening for changes, and bumping the score
+ * to indicate how urgently the engine wants to sync. It does not persist any
+ * data. Engines that track changes directly in the storage layer (like
+ * bookmarks, bridged engines, addresses, and credit cards) or only upload a
+ * single record (tabs and preferences) should subclass `Tracker`.
+ */
+export function Tracker(name, engine) {
+ if (!engine) {
+ throw new Error("Tracker must be associated with an Engine instance.");
+ }
+
+ name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.engine = engine;
+
+ this._log = Log.repository.getLogger(`Sync.Engine.${name}.Tracker`);
+
+ this._score = 0;
+
+ this.asyncObserver = Async.asyncObserver(this, this._log);
+}
+
+Tracker.prototype = {
+ // New-style trackers use change sources to filter out changes made by Sync in
+ // observer notifications, so we don't want to let the engine ignore all
+ // changes during a sync.
+ get ignoreAll() {
+ return false;
+ },
+
+ // Define an empty setter so that the engine doesn't throw a `TypeError`
+ // setting a read-only property.
+ set ignoreAll(value) {},
+
+ /*
+ * Score can be called as often as desired to decide which engines to sync
+ *
+ * Valid values for score:
+ * -1: Do not sync unless the user specifically requests it (almost disabled)
+ * 0: Nothing has changed
+ * 100: Please sync me ASAP!
+ *
+ * Setting it to other values should (but doesn't currently) throw an exception
+ */
+ get score() {
+ return this._score;
+ },
+
+ set score(value) {
+ this._score = value;
+ Observers.notify("weave:engine:score:updated", this.name);
+ },
+
+ // Should be called by service everytime a sync has been done for an engine
+ resetScore() {
+ this._score = 0;
+ },
+
+ // Unsupported, and throws a more descriptive error to ensure callers aren't
+ // accidentally using persistence.
+ async getChangedIDs() {
+ throw new TypeError("This tracker doesn't store changed IDs");
+ },
+
+ // Also unsupported.
+ async addChangedID(id, when) {
+ throw new TypeError("Can't add changed ID to this tracker");
+ },
+
+ // Ditto.
+ async removeChangedID(...ids) {
+ throw new TypeError("Can't remove changed IDs from this tracker");
+ },
+
+ // This method is called at various times, so we override with a no-op
+ // instead of throwing.
+ clearChangedIDs() {},
+
+ _now() {
+ return Date.now() / 1000;
+ },
+
+ _isTracking: false,
+
+ start() {
+ if (!this.engineIsEnabled()) {
+ return;
+ }
+ this._log.trace("start().");
+ if (!this._isTracking) {
+ this.onStart();
+ this._isTracking = true;
+ }
+ },
+
+ async stop() {
+ this._log.trace("stop().");
+ if (this._isTracking) {
+ await this.asyncObserver.promiseObserversComplete();
+ this.onStop();
+ this._isTracking = false;
+ }
+ },
+
+ // Override these in your subclasses.
+ onStart() {},
+ onStop() {},
+ async observe(subject, topic, data) {},
+
+ engineIsEnabled() {
+ if (!this.engine) {
+ // Can't tell -- we must be running in a test!
+ return true;
+ }
+ return this.engine.enabled;
+ },
+
+ /**
+ * Starts or stops listening for changes depending on the associated engine's
+ * enabled state.
+ *
+ * @param {Boolean} engineEnabled Whether the engine was enabled.
+ */
+ async onEngineEnabledChanged(engineEnabled) {
+ if (engineEnabled == this._isTracking) {
+ return;
+ }
+
+ if (engineEnabled) {
+ this.start();
+ } else {
+ await this.stop();
+ this.clearChangedIDs();
+ }
+ },
+
+ async finalize() {
+ await this.stop();
+ },
+};
+
+/*
+ * A tracker that persists a list of IDs for all changed items that need to be
+ * synced. This is 🚨 _extremely deprecated_ 🚨 and only kept around for current
+ * engines. ⚠️ Please **don't use it** for new engines! ⚠️
+ *
+ * Why is this kind of external change tracking deprecated? Because it causes
+ * consistency issues due to missed notifications, interrupted syncs, and the
+ * tracker's view of what changed diverging from the data store's.
+ */
+export function LegacyTracker(name, engine) {
+ Tracker.call(this, name, engine);
+
+ this._ignored = [];
+ this.file = this.name;
+ this._storage = new JSONFile({
+ path: Utils.jsonFilePath("changes", this.file),
+ dataPostProcessor: json => this._dataPostProcessor(json),
+ beforeSave: () => this._beforeSave(),
+ });
+ this._ignoreAll = false;
+}
+
+LegacyTracker.prototype = {
+ get ignoreAll() {
+ return this._ignoreAll;
+ },
+
+ set ignoreAll(value) {
+ this._ignoreAll = value;
+ },
+
+ // Default to an empty object if the file doesn't exist.
+ _dataPostProcessor(json) {
+ return (typeof json == "object" && json) || {};
+ },
+
+ // Ensure the Weave storage directory exists before writing the file.
+ _beforeSave() {
+ return ensureDirectory(this._storage.path);
+ },
+
+ async getChangedIDs() {
+ await this._storage.load();
+ return this._storage.data;
+ },
+
+ _saveChangedIDs() {
+ this._storage.saveSoon();
+ },
+
+ // ignore/unignore specific IDs. Useful for ignoring items that are
+ // being processed, or that shouldn't be synced.
+ // But note: not persisted to disk
+
+ ignoreID(id) {
+ this.unignoreID(id);
+ this._ignored.push(id);
+ },
+
+ unignoreID(id) {
+ let index = this._ignored.indexOf(id);
+ if (index != -1) {
+ this._ignored.splice(index, 1);
+ }
+ },
+
+ async _saveChangedID(id, when) {
+ this._log.trace(`Adding changed ID: ${id}, ${JSON.stringify(when)}`);
+ const changedIDs = await this.getChangedIDs();
+ changedIDs[id] = when;
+ this._saveChangedIDs();
+ },
+
+ async addChangedID(id, when) {
+ if (!id) {
+ this._log.warn("Attempted to add undefined ID to tracker");
+ return false;
+ }
+
+ if (this.ignoreAll || this._ignored.includes(id)) {
+ return false;
+ }
+
+ // Default to the current time in seconds if no time is provided.
+ if (when == null) {
+ when = this._now();
+ }
+
+ const changedIDs = await this.getChangedIDs();
+ // Add/update the entry if we have a newer time.
+ if ((changedIDs[id] || -Infinity) < when) {
+ await this._saveChangedID(id, when);
+ }
+
+ return true;
+ },
+
+ async removeChangedID(...ids) {
+ if (!ids.length || this.ignoreAll) {
+ return false;
+ }
+ for (let id of ids) {
+ if (!id) {
+ this._log.warn("Attempted to remove undefined ID from tracker");
+ continue;
+ }
+ if (this._ignored.includes(id)) {
+ this._log.debug(`Not removing ignored ID ${id} from tracker`);
+ continue;
+ }
+ const changedIDs = await this.getChangedIDs();
+ if (changedIDs[id] != null) {
+ this._log.trace("Removing changed ID " + id);
+ delete changedIDs[id];
+ }
+ }
+ this._saveChangedIDs();
+ return true;
+ },
+
+ clearChangedIDs() {
+ this._log.trace("Clearing changed ID list");
+ this._storage.data = {};
+ this._saveChangedIDs();
+ },
+
+ async finalize() {
+ // Persist all pending tracked changes to disk, and wait for the final write
+ // to finish.
+ await super.finalize();
+ this._saveChangedIDs();
+ await this._storage.finalize();
+ },
+};
+Object.setPrototypeOf(LegacyTracker.prototype, Tracker.prototype);
+
+/**
+ * The Store serves as the interface between Sync and stored data.
+ *
+ * The name "store" is slightly a misnomer because it doesn't actually "store"
+ * anything. Instead, it serves as a gateway to something that actually does
+ * the "storing."
+ *
+ * The store is responsible for record management inside an engine. It tells
+ * Sync what items are available for Sync, converts items to and from Sync's
+ * record format, and applies records from Sync into changes on the underlying
+ * store.
+ *
+ * Store implementations require a number of functions to be implemented. These
+ * are all documented below.
+ *
+ * For stores that deal with many records or which have expensive store access
+ * routines, it is highly recommended to implement a custom applyIncomingBatch
+ * and/or applyIncoming function on top of the basic APIs.
+ */
+
+export function Store(name, engine) {
+ if (!engine) {
+ throw new Error("Store must be associated with an Engine instance.");
+ }
+
+ name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.engine = engine;
+
+ this._log = Log.repository.getLogger(`Sync.Engine.${name}.Store`);
+
+ XPCOMUtils.defineLazyGetter(this, "_timer", function () {
+ return Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ });
+}
+
+Store.prototype = {
+ /**
+ * Apply multiple incoming records against the store.
+ *
+ * This is called with a set of incoming records to process. The function
+ * should look at each record, reconcile with the current local state, and
+ * make the local changes required to bring its state in alignment with the
+ * record.
+ *
+ * The default implementation simply iterates over all records and calls
+ * applyIncoming(). Store implementations may overwrite this function
+ * if desired.
+ *
+ * @param records Array of records to apply
+ * @param a SyncedRecordsTelemetry obj that will keep track of failed reasons
+ * @return Array of record IDs which did not apply cleanly
+ */
+ async applyIncomingBatch(records, countTelemetry) {
+ let failed = [];
+
+ await Async.yieldingForEach(records, async record => {
+ try {
+ await this.applyIncoming(record);
+ } catch (ex) {
+ if (ex.code == SyncEngine.prototype.eEngineAbortApplyIncoming) {
+ // This kind of exception should have a 'cause' attribute, which is an
+ // originating exception.
+ // ex.cause will carry its stack with it when rethrown.
+ throw ex.cause;
+ }
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Failed to apply incoming record " + record.id, ex);
+ failed.push(record.id);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+ });
+
+ return failed;
+ },
+
+ /**
+ * Apply a single record against the store.
+ *
+ * This takes a single record and makes the local changes required so the
+ * local state matches what's in the record.
+ *
+ * The default implementation calls one of remove(), create(), or update()
+ * depending on the state obtained from the store itself. Store
+ * implementations may overwrite this function if desired.
+ *
+ * @param record
+ * Record to apply
+ */
+ async applyIncoming(record) {
+ if (record.deleted) {
+ await this.remove(record);
+ } else if (!(await this.itemExists(record.id))) {
+ await this.create(record);
+ } else {
+ await this.update(record);
+ }
+ },
+
+ // override these in derived objects
+
+ /**
+ * Create an item in the store from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The store record to create an item from
+ */
+ async create(record) {
+ throw new Error("override create in a subclass");
+ },
+
+ /**
+ * Remove an item in the store from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The store record to delete an item from
+ */
+ async remove(record) {
+ throw new Error("override remove in a subclass");
+ },
+
+ /**
+ * Update an item from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The record to use to update an item from
+ */
+ async update(record) {
+ throw new Error("override update in a subclass");
+ },
+
+ /**
+ * Determine whether a record with the specified ID exists.
+ *
+ * Takes a string record ID and returns a booleans saying whether the record
+ * exists.
+ *
+ * @param id
+ * string record ID
+ * @return boolean indicating whether record exists locally
+ */
+ async itemExists(id) {
+ throw new Error("override itemExists in a subclass");
+ },
+
+ /**
+ * Create a record from the specified ID.
+ *
+ * If the ID is known, the record should be populated with metadata from
+ * the store. If the ID is not known, the record should be created with the
+ * delete field set to true.
+ *
+ * @param id
+ * string record ID
+ * @param collection
+ * Collection to add record to. This is typically passed into the
+ * constructor for the newly-created record.
+ * @return record type for this engine
+ */
+ async createRecord(id, collection) {
+ throw new Error("override createRecord in a subclass");
+ },
+
+ /**
+ * Change the ID of a record.
+ *
+ * @param oldID
+ * string old/current record ID
+ * @param newID
+ * string new record ID
+ */
+ async changeItemID(oldID, newID) {
+ throw new Error("override changeItemID in a subclass");
+ },
+
+ /**
+ * Obtain the set of all known record IDs.
+ *
+ * @return Object with ID strings as keys and values of true. The values
+ * are ignored.
+ */
+ async getAllIDs() {
+ throw new Error("override getAllIDs in a subclass");
+ },
+
+ /**
+ * Wipe all data in the store.
+ *
+ * This function is called during remote wipes or when replacing local data
+ * with remote data.
+ *
+ * This function should delete all local data that the store is managing. It
+ * can be thought of as clearing out all state and restoring the "new
+ * browser" state.
+ */
+ async wipe() {
+ throw new Error("override wipe in a subclass");
+ },
+};
+
+export function EngineManager(service) {
+ this.service = service;
+
+ this._engines = {};
+
+ this._altEngineInfo = {};
+
+ // This will be populated by Service on startup.
+ this._declined = new Set();
+ this._log = Log.repository.getLogger("Sync.EngineManager");
+ this._log.manageLevelFromPref("services.sync.log.logger.service.engines");
+ // define the default level for all engine logs here (although each engine
+ // allows its level to be controlled via a specific, non-default pref)
+ Log.repository
+ .getLogger(`Sync.Engine`)
+ .manageLevelFromPref("services.sync.log.logger.engine");
+}
+
+EngineManager.prototype = {
+ get(name) {
+ // Return an array of engines if we have an array of names
+ if (Array.isArray(name)) {
+ let engines = [];
+ name.forEach(function (name) {
+ let engine = this.get(name);
+ if (engine) {
+ engines.push(engine);
+ }
+ }, this);
+ return engines;
+ }
+
+ return this._engines[name]; // Silently returns undefined for unknown names.
+ },
+
+ getAll() {
+ let engines = [];
+ for (let [, engine] of Object.entries(this._engines)) {
+ engines.push(engine);
+ }
+ return engines;
+ },
+
+ /**
+ * If a user has changed a pref that controls which variant of a sync engine
+ * for a given collection we use, unregister the old engine and register the
+ * new one.
+ *
+ * This is called by EngineSynchronizer before every sync.
+ */
+ async switchAlternatives() {
+ for (let [name, info] of Object.entries(this._altEngineInfo)) {
+ let prefValue = info.prefValue;
+ if (prefValue === info.lastValue) {
+ this._log.trace(
+ `No change for engine ${name} (${info.pref} is still ${prefValue})`
+ );
+ continue;
+ }
+ // Unregister the old engine, register the new one.
+ this._log.info(
+ `Switching ${name} engine ("${info.pref}" went from ${info.lastValue} => ${prefValue})`
+ );
+ try {
+ await this._removeAndFinalize(name);
+ } catch (e) {
+ this._log.warn(`Failed to remove previous ${name} engine...`, e);
+ }
+ let engineType = prefValue ? info.whenTrue : info.whenFalse;
+ try {
+ // If register throws, we'll try again next sync, but until then there
+ // won't be an engine registered for this collection.
+ await this.register(engineType);
+ info.lastValue = prefValue;
+ // Note: engineType.name is using Function.prototype.name.
+ this._log.info(`Switched the ${name} engine to use ${engineType.name}`);
+ } catch (e) {
+ this._log.warn(
+ `Switching the ${name} engine to use ${engineType.name} failed (couldn't register)`,
+ e
+ );
+ }
+ }
+ },
+
+ async registerAlternatives(name, pref, whenTrue, whenFalse) {
+ let info = { name, pref, whenTrue, whenFalse };
+
+ XPCOMUtils.defineLazyPreferenceGetter(info, "prefValue", pref, false);
+
+ let chosen = info.prefValue ? info.whenTrue : info.whenFalse;
+ info.lastValue = info.prefValue;
+ this._altEngineInfo[name] = info;
+
+ await this.register(chosen);
+ },
+
+ /**
+ * N.B., does not pay attention to the declined list.
+ */
+ getEnabled() {
+ return this.getAll()
+ .filter(engine => engine.enabled)
+ .sort((a, b) => a.syncPriority - b.syncPriority);
+ },
+
+ get enabledEngineNames() {
+ return this.getEnabled().map(e => e.name);
+ },
+
+ persistDeclined() {
+ Svc.Prefs.set("declinedEngines", [...this._declined].join(","));
+ },
+
+ /**
+ * Returns an array.
+ */
+ getDeclined() {
+ return [...this._declined];
+ },
+
+ setDeclined(engines) {
+ this._declined = new Set(engines);
+ this.persistDeclined();
+ },
+
+ isDeclined(engineName) {
+ return this._declined.has(engineName);
+ },
+
+ /**
+ * Accepts a Set or an array.
+ */
+ decline(engines) {
+ for (let e of engines) {
+ this._declined.add(e);
+ }
+ this.persistDeclined();
+ },
+
+ undecline(engines) {
+ for (let e of engines) {
+ this._declined.delete(e);
+ }
+ this.persistDeclined();
+ },
+
+ /**
+ * Register an Engine to the service. Alternatively, give an array of engine
+ * objects to register.
+ *
+ * @param engineObject
+ * Engine object used to get an instance of the engine
+ * @return The engine object if anything failed
+ */
+ async register(engineObject) {
+ if (Array.isArray(engineObject)) {
+ for (const e of engineObject) {
+ await this.register(e);
+ }
+ return;
+ }
+
+ try {
+ let engine = new engineObject(this.service);
+ let name = engine.name;
+ if (name in this._engines) {
+ this._log.error("Engine '" + name + "' is already registered!");
+ } else {
+ if (engine.initialize) {
+ await engine.initialize();
+ }
+ this._engines[name] = engine;
+ }
+ } catch (ex) {
+ let name = engineObject || "";
+ name = name.prototype || "";
+ name = name.name || "";
+
+ this._log.error(`Could not initialize engine ${name}`, ex);
+ }
+ },
+
+ async unregister(val) {
+ let name = val;
+ if (val instanceof SyncEngine) {
+ name = val.name;
+ }
+ await this._removeAndFinalize(name);
+ delete this._altEngineInfo[name];
+ },
+
+ // Common code for disabling an engine by name, that doesn't complain if the
+ // engine doesn't exist. Doesn't touch the engine's alternative info (if any
+ // exists).
+ async _removeAndFinalize(name) {
+ if (name in this._engines) {
+ let engine = this._engines[name];
+ delete this._engines[name];
+ await engine.finalize();
+ }
+ },
+
+ async clear() {
+ for (let name in this._engines) {
+ let engine = this._engines[name];
+ delete this._engines[name];
+ await engine.finalize();
+ }
+ this._altEngineInfo = {};
+ },
+};
+
+export function SyncEngine(name, service) {
+ if (!service) {
+ throw new Error("SyncEngine must be associated with a Service instance.");
+ }
+
+ this.Name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.service = service;
+
+ this._notify = Utils.notify("weave:engine:");
+ this._log = Log.repository.getLogger("Sync.Engine." + this.Name);
+ this._log.manageLevelFromPref(`services.sync.log.logger.engine.${this.name}`);
+
+ this._modified = this.emptyChangeset();
+ this._tracker; // initialize tracker to load previously changed IDs
+ this._log.debug("Engine constructed");
+
+ this._toFetchStorage = new JSONFile({
+ path: Utils.jsonFilePath("toFetch", this.name),
+ dataPostProcessor: json => this._metadataPostProcessor(json),
+ beforeSave: () => this._beforeSaveMetadata(),
+ });
+
+ this._previousFailedStorage = new JSONFile({
+ path: Utils.jsonFilePath("failed", this.name),
+ dataPostProcessor: json => this._metadataPostProcessor(json),
+ beforeSave: () => this._beforeSaveMetadata(),
+ });
+
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_enabled",
+ `services.sync.engine.${this.prefName}`,
+ false
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_syncID",
+ `services.sync.${this.name}.syncID`,
+ ""
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_lastSync",
+ `services.sync.${this.name}.lastSync`,
+ "0",
+ null,
+ v => parseFloat(v)
+ );
+ // Async initializations can be made in the initialize() method.
+
+ this.asyncObserver = Async.asyncObserver(this, this._log);
+}
+
+// Enumeration to define approaches to handling bad records.
+// Attached to the constructor to allow use as a kind of static enumeration.
+SyncEngine.kRecoveryStrategy = {
+ ignore: "ignore",
+ retry: "retry",
+ error: "error",
+};
+
+SyncEngine.prototype = {
+ _recordObj: CryptoWrapper,
+ // _storeObj, and _trackerObj should to be overridden in subclasses
+ _storeObj: Store,
+ _trackerObj: Tracker,
+ version: 1,
+
+ // Local 'constant'.
+ // Signal to the engine that processing further records is pointless.
+ eEngineAbortApplyIncoming: "error.engine.abort.applyincoming",
+
+ // Should we keep syncing if we find a record that cannot be uploaded (ever)?
+ // If this is false, we'll throw, otherwise, we'll ignore the record and
+ // continue. This currently can only happen due to the record being larger
+ // than the record upload limit.
+ allowSkippedRecord: true,
+
+ // Which sortindex to use when retrieving records for this engine.
+ _defaultSort: undefined,
+
+ _hasSyncedThisSession: false,
+
+ _metadataPostProcessor(json) {
+ if (Array.isArray(json)) {
+ // Pre-`JSONFile` storage stored an array, but `JSONFile` defaults to
+ // an object, so we wrap the array for consistency.
+ json = { ids: json };
+ }
+ if (!json.ids) {
+ json.ids = [];
+ }
+ // The set serializes the same way as an array, but offers more efficient
+ // methods of manipulation.
+ json.ids = new SerializableSet(json.ids);
+ return json;
+ },
+
+ async _beforeSaveMetadata() {
+ await ensureDirectory(this._toFetchStorage.path);
+ await ensureDirectory(this._previousFailedStorage.path);
+ },
+
+ // A relative priority to use when computing an order
+ // for engines to be synced. Higher-priority engines
+ // (lower numbers) are synced first.
+ // It is recommended that a unique value be used for each engine,
+ // in order to guarantee a stable sequence.
+ syncPriority: 0,
+
+ // How many records to pull in a single sync. This is primarily to avoid very
+ // long first syncs against profiles with many history records.
+ downloadLimit: null,
+
+ // How many records to pull at one time when specifying IDs. This is to avoid
+ // URI length limitations.
+ guidFetchBatchSize: DEFAULT_GUID_FETCH_BATCH_SIZE,
+
+ downloadBatchSize: DEFAULT_DOWNLOAD_BATCH_SIZE,
+
+ async initialize() {
+ await this._toFetchStorage.load();
+ await this._previousFailedStorage.load();
+ Svc.Prefs.observe(`engine.${this.prefName}`, this.asyncObserver);
+ this._log.debug("SyncEngine initialized", this.name);
+ },
+
+ get prefName() {
+ return this.name;
+ },
+
+ get enabled() {
+ return this._enabled;
+ },
+
+ set enabled(val) {
+ if (!!val != this._enabled) {
+ Svc.Prefs.set("engine." + this.prefName, !!val);
+ }
+ },
+
+ get score() {
+ return this._tracker.score;
+ },
+
+ get _store() {
+ let store = new this._storeObj(this.Name, this);
+ this.__defineGetter__("_store", () => store);
+ return store;
+ },
+
+ get _tracker() {
+ let tracker = new this._trackerObj(this.Name, this);
+ this.__defineGetter__("_tracker", () => tracker);
+ return tracker;
+ },
+
+ get storageURL() {
+ return this.service.storageURL;
+ },
+
+ get engineURL() {
+ return this.storageURL + this.name;
+ },
+
+ get cryptoKeysURL() {
+ return this.storageURL + "crypto/keys";
+ },
+
+ get metaURL() {
+ return this.storageURL + "meta/global";
+ },
+
+ startTracking() {
+ this._tracker.start();
+ },
+
+ // Returns a promise
+ stopTracking() {
+ return this._tracker.stop();
+ },
+
+ // Listens for engine enabled state changes, and updates the tracker's state.
+ // This is an async observer because the tracker waits on all its async
+ // observers to finish when it's stopped.
+ async observe(subject, topic, data) {
+ if (
+ topic == "nsPref:changed" &&
+ data == `services.sync.engine.${this.prefName}`
+ ) {
+ await this._tracker.onEngineEnabledChanged(this._enabled);
+ }
+ },
+
+ async sync() {
+ if (!this.enabled) {
+ return false;
+ }
+
+ if (!this._sync) {
+ throw new Error("engine does not implement _sync method");
+ }
+
+ return this._notify("sync", this.name, this._sync)();
+ },
+
+ // Override this method to return a new changeset type.
+ emptyChangeset() {
+ return new Changeset();
+ },
+
+ /**
+ * Returns the local sync ID for this engine, or `""` if the engine hasn't
+ * synced for the first time. This is exposed for tests.
+ *
+ * @return the current sync ID.
+ */
+ async getSyncID() {
+ return this._syncID;
+ },
+
+ /**
+ * Ensures that the local sync ID for the engine matches the sync ID for the
+ * collection on the server. A mismatch indicates that another client wiped
+ * the collection; we're syncing after a node reassignment, and another
+ * client synced before us; or the store was replaced since the last sync.
+ * In case of a mismatch, we need to reset all local Sync state and start
+ * over as a first sync.
+ *
+ * In most cases, this method should return the new sync ID as-is. However, an
+ * engine may ignore the given ID and assign a different one, if it determines
+ * that the sync ID on the server is out of date. The bookmarks engine uses
+ * this to wipe the server and other clients on the first sync after the user
+ * restores from a backup.
+ *
+ * @param newSyncID
+ * The new sync ID for the collection from `meta/global`.
+ * @return The assigned sync ID. If this doesn't match `newSyncID`, we'll
+ * replace the sync ID in `meta/global` with the assigned ID.
+ */
+ async ensureCurrentSyncID(newSyncID) {
+ let existingSyncID = this._syncID;
+ if (existingSyncID == newSyncID) {
+ return existingSyncID;
+ }
+ this._log.debug("Engine syncIDs: " + [newSyncID, existingSyncID]);
+ Svc.Prefs.set(this.name + ".syncID", newSyncID);
+ Svc.Prefs.set(this.name + ".lastSync", "0");
+ return newSyncID;
+ },
+
+ /**
+ * Resets the local sync ID for the engine, wipes the server, and resets all
+ * local Sync state to start over as a first sync.
+ *
+ * @return the new sync ID.
+ */
+ async resetSyncID() {
+ let newSyncID = await this.resetLocalSyncID();
+ await this.wipeServer();
+ return newSyncID;
+ },
+
+ /**
+ * Resets the local sync ID for the engine, signaling that we're starting over
+ * as a first sync.
+ *
+ * @return the new sync ID.
+ */
+ async resetLocalSyncID() {
+ return this.ensureCurrentSyncID(Utils.makeGUID());
+ },
+
+ /**
+ * Allows overriding scheduler logic -- added to help reduce kinto server
+ * getting hammered because our scheduler never got tuned for it.
+ *
+ * Note: Overriding engines must take resyncs into account -- score will not
+ * be cleared.
+ */
+ shouldSkipSync(syncReason) {
+ return false;
+ },
+
+ /*
+ * lastSync is a timestamp in server time.
+ */
+ async getLastSync() {
+ return this._lastSync;
+ },
+ async setLastSync(lastSync) {
+ // Store the value as a string to keep floating point precision
+ Svc.Prefs.set(this.name + ".lastSync", lastSync.toString());
+ },
+ async resetLastSync() {
+ this._log.debug("Resetting " + this.name + " last sync time");
+ await this.setLastSync(0);
+ },
+
+ get hasSyncedThisSession() {
+ return this._hasSyncedThisSession;
+ },
+
+ set hasSyncedThisSession(hasSynced) {
+ this._hasSyncedThisSession = hasSynced;
+ },
+
+ get toFetch() {
+ this._toFetchStorage.ensureDataReady();
+ return this._toFetchStorage.data.ids;
+ },
+
+ set toFetch(ids) {
+ if (ids.constructor.name != "SerializableSet") {
+ throw new Error(
+ "Bug: Attempted to set toFetch to something that isn't a SerializableSet"
+ );
+ }
+ this._toFetchStorage.data = { ids };
+ this._toFetchStorage.saveSoon();
+ },
+
+ get previousFailed() {
+ this._previousFailedStorage.ensureDataReady();
+ return this._previousFailedStorage.data.ids;
+ },
+
+ set previousFailed(ids) {
+ if (ids.constructor.name != "SerializableSet") {
+ throw new Error(
+ "Bug: Attempted to set previousFailed to something that isn't a SerializableSet"
+ );
+ }
+ this._previousFailedStorage.data = { ids };
+ this._previousFailedStorage.saveSoon();
+ },
+
+ /*
+ * Returns a changeset for this sync. Engine implementations can override this
+ * method to bypass the tracker for certain or all changed items.
+ */
+ async getChangedIDs() {
+ return this._tracker.getChangedIDs();
+ },
+
+ // Create a new record using the store and add in metadata.
+ async _createRecord(id) {
+ let record = await this._store.createRecord(id, this.name);
+ record.id = id;
+ record.collection = this.name;
+ return record;
+ },
+
+ // Creates a tombstone Sync record with additional metadata.
+ _createTombstone(id) {
+ let tombstone = new this._recordObj(this.name, id);
+ tombstone.id = id;
+ tombstone.collection = this.name;
+ tombstone.deleted = true;
+ return tombstone;
+ },
+
+ // Any setup that needs to happen at the beginning of each sync.
+ async _syncStartup() {
+ // Determine if we need to wipe on outdated versions
+ let metaGlobal = await this.service.recordManager.get(this.metaURL);
+ let engines = metaGlobal.payload.engines || {};
+ let engineData = engines[this.name] || {};
+
+ // Assume missing versions are 0 and wipe the server
+ if ((engineData.version || 0) < this.version) {
+ this._log.debug("Old engine data: " + [engineData.version, this.version]);
+
+ // Clear the server and reupload everything on bad version or missing
+ // meta. Note that we don't regenerate per-collection keys here.
+ let newSyncID = await this.resetSyncID();
+
+ // Set the newer version and newly generated syncID
+ engineData.version = this.version;
+ engineData.syncID = newSyncID;
+
+ // Put the new data back into meta/global and mark for upload
+ engines[this.name] = engineData;
+ metaGlobal.payload.engines = engines;
+ metaGlobal.changed = true;
+ } else if (engineData.version > this.version) {
+ // Don't sync this engine if the server has newer data
+
+ let error = new Error("New data: " + [engineData.version, this.version]);
+ error.failureCode = VERSION_OUT_OF_DATE;
+ throw error;
+ } else {
+ // Changes to syncID mean we'll need to upload everything
+ let assignedSyncID = await this.ensureCurrentSyncID(engineData.syncID);
+ if (assignedSyncID != engineData.syncID) {
+ engineData.syncID = assignedSyncID;
+ metaGlobal.changed = true;
+ }
+ }
+
+ // Save objects that need to be uploaded in this._modified. As we
+ // successfully upload objects we remove them from this._modified. If an
+ // error occurs or any objects fail to upload, they will remain in
+ // this._modified. At the end of a sync, or after an error, we add all
+ // objects remaining in this._modified to the tracker.
+ let initialChanges = await this.pullChanges();
+ this._modified.replace(initialChanges);
+ // Clear the tracker now. If the sync fails we'll add the ones we failed
+ // to upload back.
+ this._tracker.clearChangedIDs();
+ this._tracker.resetScore();
+
+ // Keep track of what to delete at the end of sync
+ this._delete = {};
+ },
+
+ async pullChanges() {
+ let lastSync = await this.getLastSync();
+ if (lastSync) {
+ return this.pullNewChanges();
+ }
+ this._log.debug("First sync, uploading all items");
+ return this.pullAllChanges();
+ },
+
+ /**
+ * A tiny abstraction to make it easier to test incoming record
+ * application.
+ */
+ itemSource() {
+ return new Collection(this.engineURL, this._recordObj, this.service);
+ },
+
+ /**
+ * Download and apply remote records changed since the last sync. This
+ * happens in three stages.
+ *
+ * In the first stage, we fetch full records for all changed items, newest
+ * first, up to the download limit. The limit lets us make progress for large
+ * collections, where the sync is likely to be interrupted before we
+ * can fetch everything.
+ *
+ * In the second stage, we fetch the IDs of any remaining records changed
+ * since the last sync, add them to our backlog, and fast-forward our last
+ * sync time.
+ *
+ * In the third stage, we fetch and apply records for all backlogged IDs,
+ * as well as any records that failed to apply during the last sync. We
+ * request records for the IDs in chunks, to avoid exceeding URL length
+ * limits, then remove successfully applied records from the backlog, and
+ * record IDs of any records that failed to apply to retry on the next sync.
+ */
+ async _processIncoming() {
+ this._log.trace("Downloading & applying server changes");
+
+ let newitems = this.itemSource();
+ let lastSync = await this.getLastSync();
+
+ newitems.newer = lastSync;
+ newitems.full = true;
+
+ let downloadLimit = Infinity;
+ if (this.downloadLimit) {
+ // Fetch new records up to the download limit. Currently, only the history
+ // engine sets a limit, since the history collection has the highest volume
+ // of changed records between syncs. The other engines fetch all records
+ // changed since the last sync.
+ if (this._defaultSort) {
+ // A download limit with a sort order doesn't make sense: we won't know
+ // which records to backfill.
+ throw new Error("Can't specify download limit with default sort order");
+ }
+ newitems.sort = "newest";
+ downloadLimit = newitems.limit = this.downloadLimit;
+ } else if (this._defaultSort) {
+ // The bookmarks engine fetches records by sort index; other engines leave
+ // the order unspecified. We can remove `_defaultSort` entirely after bug
+ // 1305563: the sort index won't matter because we'll buffer all bookmarks
+ // before applying.
+ newitems.sort = this._defaultSort;
+ }
+
+ // applied => number of items that should be applied.
+ // failed => number of items that failed in this sync.
+ // newFailed => number of items that failed for the first time in this sync.
+ // reconciled => number of items that were reconciled.
+ // failedReasons => {name, count} of reasons a record failed
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let count = countTelemetry.incomingCounts;
+ let recordsToApply = [];
+ let failedInCurrentSync = new SerializableSet();
+
+ let oldestModified = this.lastModified;
+ let downloadedIDs = new Set();
+
+ // Stage 1: Fetch new records from the server, up to the download limit.
+ if (this.lastModified == null || this.lastModified > lastSync) {
+ let { response, records } = await newitems.getBatched(
+ this.downloadBatchSize
+ );
+ if (!response.success) {
+ response.failureCode = ENGINE_DOWNLOAD_FAIL;
+ throw response;
+ }
+
+ await Async.yieldingForEach(records, async record => {
+ downloadedIDs.add(record.id);
+
+ if (record.modified < oldestModified) {
+ oldestModified = record.modified;
+ }
+
+ let { shouldApply, error } = await this._maybeReconcile(record);
+ if (error) {
+ failedInCurrentSync.add(record.id);
+ count.failed++;
+ countTelemetry.addIncomingFailedReason(error.message);
+ return;
+ }
+ if (!shouldApply) {
+ count.reconciled++;
+ return;
+ }
+ recordsToApply.push(record);
+ });
+
+ let failedToApply = await this._applyRecords(
+ recordsToApply,
+ countTelemetry
+ );
+ Utils.setAddAll(failedInCurrentSync, failedToApply);
+
+ // `applied` is a bit of a misnomer: it counts records that *should* be
+ // applied, so it also includes records that we tried to apply and failed.
+ // `recordsToApply.length - failedToApply.length` is the number of records
+ // that we *successfully* applied.
+ count.failed += failedToApply.length;
+ count.applied += recordsToApply.length;
+ }
+
+ // Stage 2: If we reached our download limit, we might still have records
+ // on the server that changed since the last sync. Fetch the IDs for the
+ // remaining records, and add them to the backlog. Note that this stage
+ // only runs for engines that set a download limit.
+ if (downloadedIDs.size == downloadLimit) {
+ let guidColl = this.itemSource();
+
+ guidColl.newer = lastSync;
+ guidColl.older = oldestModified;
+ guidColl.sort = "oldest";
+
+ let guids = await guidColl.get();
+ if (!guids.success) {
+ throw guids;
+ }
+
+ // Filtering out already downloaded IDs here isn't necessary. We only do
+ // that in case the Sync server doesn't support `older` (bug 1316110).
+ let remainingIDs = guids.obj.filter(id => !downloadedIDs.has(id));
+ if (remainingIDs.length) {
+ this.toFetch = Utils.setAddAll(this.toFetch, remainingIDs);
+ }
+ }
+
+ // Fast-foward the lastSync timestamp since we have backlogged the
+ // remaining items.
+ if (lastSync < this.lastModified) {
+ lastSync = this.lastModified;
+ await this.setLastSync(lastSync);
+ }
+
+ // Stage 3: Backfill records from the backlog, and those that failed to
+ // decrypt or apply during the last sync. We only backfill up to the
+ // download limit, to prevent a large backlog for one engine from blocking
+ // the others. We'll keep processing the backlog on subsequent engine syncs.
+ let failedInPreviousSync = this.previousFailed;
+ let idsToBackfill = Array.from(
+ Utils.setAddAll(
+ Utils.subsetOfSize(this.toFetch, downloadLimit),
+ failedInPreviousSync
+ )
+ );
+
+ // Note that we intentionally overwrite the previously failed list here.
+ // Records that fail to decrypt or apply in two consecutive syncs are likely
+ // corrupt; we remove them from the list because retrying and failing on
+ // every subsequent sync just adds noise.
+ this.previousFailed = failedInCurrentSync;
+
+ let backfilledItems = this.itemSource();
+
+ backfilledItems.sort = "newest";
+ backfilledItems.full = true;
+
+ // `getBatched` includes the list of IDs as a query parameter, so we need to fetch
+ // records in chunks to avoid exceeding URI length limits.
+ if (this.guidFetchBatchSize) {
+ for (let ids of lazy.PlacesUtils.chunkArray(
+ idsToBackfill,
+ this.guidFetchBatchSize
+ )) {
+ backfilledItems.ids = ids;
+
+ let { response, records } = await backfilledItems.getBatched(
+ this.downloadBatchSize
+ );
+ if (!response.success) {
+ response.failureCode = ENGINE_DOWNLOAD_FAIL;
+ throw response;
+ }
+
+ let backfilledRecordsToApply = [];
+ let failedInBackfill = [];
+
+ await Async.yieldingForEach(records, async record => {
+ let { shouldApply, error } = await this._maybeReconcile(record);
+ if (error) {
+ failedInBackfill.push(record.id);
+ count.failed++;
+ countTelemetry.addIncomingFailedReason(error.message);
+ return;
+ }
+ if (!shouldApply) {
+ count.reconciled++;
+ return;
+ }
+ backfilledRecordsToApply.push(record);
+ });
+
+ let failedToApply = await this._applyRecords(
+ backfilledRecordsToApply,
+ countTelemetry
+ );
+ failedInBackfill.push(...failedToApply);
+
+ count.failed += failedToApply.length;
+ count.applied += backfilledRecordsToApply.length;
+
+ this.toFetch = Utils.setDeleteAll(this.toFetch, ids);
+ this.previousFailed = Utils.setAddAll(
+ this.previousFailed,
+ failedInBackfill
+ );
+
+ if (lastSync < this.lastModified) {
+ lastSync = this.lastModified;
+ await this.setLastSync(lastSync);
+ }
+ }
+ }
+
+ count.newFailed = 0;
+ for (let item of this.previousFailed) {
+ // Anything that failed in the current sync that also failed in
+ // the previous sync means there is likely something wrong with
+ // the record, we remove it from trying again to prevent
+ // infinitely syncing corrupted records
+ if (failedInPreviousSync.has(item)) {
+ this.previousFailed.delete(item);
+ } else {
+ // otherwise it's a new failed and we count it as so
+ ++count.newFailed;
+ }
+ }
+
+ count.succeeded = Math.max(0, count.applied - count.failed);
+ this._log.info(
+ [
+ "Records:",
+ count.applied,
+ "applied,",
+ count.succeeded,
+ "successfully,",
+ count.failed,
+ "failed to apply,",
+ count.newFailed,
+ "newly failed to apply,",
+ count.reconciled,
+ "reconciled.",
+ ].join(" ")
+ );
+ Observers.notify("weave:engine:sync:applied", count, this.name);
+ },
+
+ async _maybeReconcile(item) {
+ let key = this.service.collectionKeys.keyForCollection(this.name);
+
+ // Grab a later last modified if possible
+ if (this.lastModified == null || item.modified > this.lastModified) {
+ this.lastModified = item.modified;
+ }
+
+ try {
+ try {
+ await item.decrypt(key);
+ } catch (ex) {
+ if (!Utils.isHMACMismatch(ex)) {
+ throw ex;
+ }
+ let strategy = await this.handleHMACMismatch(item, true);
+ if (strategy == SyncEngine.kRecoveryStrategy.retry) {
+ // You only get one retry.
+ try {
+ // Try decrypting again, typically because we've got new keys.
+ this._log.info("Trying decrypt again...");
+ key = this.service.collectionKeys.keyForCollection(this.name);
+ await item.decrypt(key);
+ strategy = null;
+ } catch (ex) {
+ if (!Utils.isHMACMismatch(ex)) {
+ throw ex;
+ }
+ strategy = await this.handleHMACMismatch(item, false);
+ }
+ }
+
+ switch (strategy) {
+ case null:
+ // Retry succeeded! No further handling.
+ break;
+ case SyncEngine.kRecoveryStrategy.retry:
+ this._log.debug("Ignoring second retry suggestion.");
+ // Fall through to error case.
+ case SyncEngine.kRecoveryStrategy.error:
+ this._log.warn("Error decrypting record", ex);
+ return { shouldApply: false, error: ex };
+ case SyncEngine.kRecoveryStrategy.ignore:
+ this._log.debug(
+ "Ignoring record " + item.id + " with bad HMAC: already handled."
+ );
+ return { shouldApply: false, error: null };
+ }
+ }
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Error decrypting record", ex);
+ return { shouldApply: false, error: ex };
+ }
+
+ if (this._shouldDeleteRemotely(item)) {
+ this._log.trace("Deleting item from server without applying", item);
+ await this._deleteId(item.id);
+ return { shouldApply: false, error: null };
+ }
+
+ let shouldApply;
+ try {
+ shouldApply = await this._reconcile(item);
+ } catch (ex) {
+ if (ex.code == SyncEngine.prototype.eEngineAbortApplyIncoming) {
+ this._log.warn("Reconciliation failed: aborting incoming processing.");
+ throw ex.cause;
+ } else if (!Async.isShutdownException(ex)) {
+ this._log.warn("Failed to reconcile incoming record " + item.id, ex);
+ return { shouldApply: false, error: ex };
+ } else {
+ throw ex;
+ }
+ }
+
+ if (!shouldApply) {
+ this._log.trace("Skipping reconciled incoming item " + item.id);
+ }
+
+ return { shouldApply, error: null };
+ },
+
+ async _applyRecords(records, countTelemetry) {
+ this._tracker.ignoreAll = true;
+ try {
+ let failedIDs = await this._store.applyIncomingBatch(
+ records,
+ countTelemetry
+ );
+ return failedIDs;
+ } catch (ex) {
+ // Catch any error that escapes from applyIncomingBatch. At present
+ // those will all be abort events.
+ this._log.warn("Got exception, aborting processIncoming", ex);
+ throw ex;
+ } finally {
+ this._tracker.ignoreAll = false;
+ }
+ },
+
+ // Indicates whether an incoming item should be deleted from the server at
+ // the end of the sync. Engines can override this method to clean up records
+ // that shouldn't be on the server.
+ _shouldDeleteRemotely(remoteItem) {
+ return false;
+ },
+
+ /**
+ * Find a GUID of an item that is a duplicate of the incoming item but happens
+ * to have a different GUID
+ *
+ * @return GUID of the similar item; falsy otherwise
+ */
+ async _findDupe(item) {
+ // By default, assume there's no dupe items for the engine
+ },
+
+ /**
+ * Called before a remote record is discarded due to failed reconciliation.
+ * Used by bookmark sync to merge folder child orders.
+ */
+ beforeRecordDiscard(localRecord, remoteRecord, remoteIsNewer) {},
+
+ // Called when the server has a record marked as deleted, but locally we've
+ // changed it more recently than the deletion. If we return false, the
+ // record will be deleted locally. If we return true, we'll reupload the
+ // record to the server -- any extra work that's needed as part of this
+ // process should be done at this point (such as mark the record's parent
+ // for reuploading in the case of bookmarks).
+ async _shouldReviveRemotelyDeletedRecord(remoteItem) {
+ return true;
+ },
+
+ async _deleteId(id) {
+ await this._tracker.removeChangedID(id);
+ this._noteDeletedId(id);
+ },
+
+ // Marks an ID for deletion at the end of the sync.
+ _noteDeletedId(id) {
+ if (this._delete.ids == null) {
+ this._delete.ids = [id];
+ } else {
+ this._delete.ids.push(id);
+ }
+ },
+
+ async _switchItemToDupe(localDupeGUID, incomingItem) {
+ // The local, duplicate ID is always deleted on the server.
+ await this._deleteId(localDupeGUID);
+
+ // We unconditionally change the item's ID in case the engine knows of
+ // an item but doesn't expose it through itemExists. If the API
+ // contract were stronger, this could be changed.
+ this._log.debug(
+ "Switching local ID to incoming: " +
+ localDupeGUID +
+ " -> " +
+ incomingItem.id
+ );
+ return this._store.changeItemID(localDupeGUID, incomingItem.id);
+ },
+
+ /**
+ * Reconcile incoming record with local state.
+ *
+ * This function essentially determines whether to apply an incoming record.
+ *
+ * @param item
+ * Record from server to be tested for application.
+ * @return boolean
+ * Truthy if incoming record should be applied. False if not.
+ */
+ async _reconcile(item) {
+ if (this._log.level <= Log.Level.Trace) {
+ this._log.trace("Incoming: " + item);
+ }
+
+ // We start reconciling by collecting a bunch of state. We do this here
+ // because some state may change during the course of this function and we
+ // need to operate on the original values.
+ let existsLocally = await this._store.itemExists(item.id);
+ let locallyModified = this._modified.has(item.id);
+
+ // TODO Handle clock drift better. Tracked in bug 721181.
+ let remoteAge = Resource.serverTime - item.modified;
+ let localAge = locallyModified
+ ? Date.now() / 1000 - this._modified.getModifiedTimestamp(item.id)
+ : null;
+ let remoteIsNewer = remoteAge < localAge;
+
+ this._log.trace(
+ "Reconciling " +
+ item.id +
+ ". exists=" +
+ existsLocally +
+ "; modified=" +
+ locallyModified +
+ "; local age=" +
+ localAge +
+ "; incoming age=" +
+ remoteAge
+ );
+
+ // We handle deletions first so subsequent logic doesn't have to check
+ // deleted flags.
+ if (item.deleted) {
+ // If the item doesn't exist locally, there is nothing for us to do. We
+ // can't check for duplicates because the incoming record has no data
+ // which can be used for duplicate detection.
+ if (!existsLocally) {
+ this._log.trace(
+ "Ignoring incoming item because it was deleted and " +
+ "the item does not exist locally."
+ );
+ return false;
+ }
+
+ // We decide whether to process the deletion by comparing the record
+ // ages. If the item is not modified locally, the remote side wins and
+ // the deletion is processed. If it is modified locally, we take the
+ // newer record.
+ if (!locallyModified) {
+ this._log.trace(
+ "Applying incoming delete because the local item " +
+ "exists and isn't modified."
+ );
+ return true;
+ }
+ this._log.trace("Incoming record is deleted but we had local changes.");
+
+ if (remoteIsNewer) {
+ this._log.trace("Remote record is newer -- deleting local record.");
+ return true;
+ }
+ // If the local record is newer, we defer to individual engines for
+ // how to handle this. By default, we revive the record.
+ let willRevive = await this._shouldReviveRemotelyDeletedRecord(item);
+ this._log.trace("Local record is newer -- reviving? " + willRevive);
+
+ return !willRevive;
+ }
+
+ // At this point the incoming record is not for a deletion and must have
+ // data. If the incoming record does not exist locally, we check for a local
+ // duplicate existing under a different ID. The default implementation of
+ // _findDupe() is empty, so engines have to opt in to this functionality.
+ //
+ // If we find a duplicate, we change the local ID to the incoming ID and we
+ // refresh the metadata collected above. See bug 710448 for the history
+ // of this logic.
+ if (!existsLocally) {
+ let localDupeGUID = await this._findDupe(item);
+ if (localDupeGUID) {
+ this._log.trace(
+ "Local item " +
+ localDupeGUID +
+ " is a duplicate for " +
+ "incoming item " +
+ item.id
+ );
+
+ // The current API contract does not mandate that the ID returned by
+ // _findDupe() actually exists. Therefore, we have to perform this
+ // check.
+ existsLocally = await this._store.itemExists(localDupeGUID);
+
+ // If the local item was modified, we carry its metadata forward so
+ // appropriate reconciling can be performed.
+ if (this._modified.has(localDupeGUID)) {
+ locallyModified = true;
+ localAge =
+ this._tracker._now() -
+ this._modified.getModifiedTimestamp(localDupeGUID);
+ remoteIsNewer = remoteAge < localAge;
+
+ this._modified.changeID(localDupeGUID, item.id);
+ } else {
+ locallyModified = false;
+ localAge = null;
+ }
+
+ // Tell the engine to do whatever it needs to switch the items.
+ await this._switchItemToDupe(localDupeGUID, item);
+
+ this._log.debug(
+ "Local item after duplication: age=" +
+ localAge +
+ "; modified=" +
+ locallyModified +
+ "; exists=" +
+ existsLocally
+ );
+ } else {
+ this._log.trace("No duplicate found for incoming item: " + item.id);
+ }
+ }
+
+ // At this point we've performed duplicate detection. But, nothing here
+ // should depend on duplicate detection as the above should have updated
+ // state seamlessly.
+
+ if (!existsLocally) {
+ // If the item doesn't exist locally and we have no local modifications
+ // to the item (implying that it was not deleted), always apply the remote
+ // item.
+ if (!locallyModified) {
+ this._log.trace(
+ "Applying incoming because local item does not exist " +
+ "and was not deleted."
+ );
+ return true;
+ }
+
+ // If the item was modified locally but isn't present, it must have
+ // been deleted. If the incoming record is younger, we restore from
+ // that record.
+ if (remoteIsNewer) {
+ this._log.trace(
+ "Applying incoming because local item was deleted " +
+ "before the incoming item was changed."
+ );
+ this._modified.delete(item.id);
+ return true;
+ }
+
+ this._log.trace(
+ "Ignoring incoming item because the local item's " +
+ "deletion is newer."
+ );
+ return false;
+ }
+
+ // If the remote and local records are the same, there is nothing to be
+ // done, so we don't do anything. In the ideal world, this logic wouldn't
+ // be here and the engine would take a record and apply it. The reason we
+ // want to defer this logic is because it would avoid a redundant and
+ // possibly expensive dip into the storage layer to query item state.
+ // This should get addressed in the async rewrite, so we ignore it for now.
+ let localRecord = await this._createRecord(item.id);
+ let recordsEqual = Utils.deepEquals(item.cleartext, localRecord.cleartext);
+
+ // If the records are the same, we don't need to do anything. This does
+ // potentially throw away a local modification time. But, if the records
+ // are the same, does it matter?
+ if (recordsEqual) {
+ this._log.trace(
+ "Ignoring incoming item because the local item is identical."
+ );
+
+ this._modified.delete(item.id);
+ return false;
+ }
+
+ // At this point the records are different.
+
+ // If we have no local modifications, always take the server record.
+ if (!locallyModified) {
+ this._log.trace("Applying incoming record because no local conflicts.");
+ return true;
+ }
+
+ // At this point, records are different and the local record is modified.
+ // We resolve conflicts by record age, where the newest one wins. This does
+ // result in data loss and should be handled by giving the engine an
+ // opportunity to merge the records. Bug 720592 tracks this feature.
+ this._log.warn(
+ "DATA LOSS: Both local and remote changes to record: " + item.id
+ );
+ if (!remoteIsNewer) {
+ this.beforeRecordDiscard(localRecord, item, remoteIsNewer);
+ }
+ return remoteIsNewer;
+ },
+
+ // Upload outgoing records.
+ async _uploadOutgoing() {
+ this._log.trace("Uploading local changes to server.");
+
+ // collection we'll upload
+ let up = new Collection(this.engineURL, null, this.service);
+ let modifiedIDs = new Set(this._modified.ids());
+ let countTelemetry = new SyncedRecordsTelemetry();
+ let counts = countTelemetry.outgoingCounts;
+ this._log.info(`Uploading ${modifiedIDs.size} outgoing records`);
+ if (modifiedIDs.size) {
+ counts.sent = modifiedIDs.size;
+
+ let failed = [];
+ let successful = [];
+ let lastSync = await this.getLastSync();
+ let handleResponse = async (postQueue, resp, batchOngoing) => {
+ // Note: We don't want to update this.lastSync, or this._modified until
+ // the batch is complete, however we want to remember success/failure
+ // indicators for when that happens.
+ if (!resp.success) {
+ this._log.debug(`Uploading records failed: ${resp.status}`);
+ resp.failureCode =
+ resp.status == 412 ? ENGINE_BATCH_INTERRUPTED : ENGINE_UPLOAD_FAIL;
+ throw resp;
+ }
+
+ // Update server timestamp from the upload.
+ failed = failed.concat(Object.keys(resp.obj.failed));
+ successful = successful.concat(resp.obj.success);
+
+ if (batchOngoing) {
+ // Nothing to do yet
+ return;
+ }
+
+ if (failed.length && this._log.level <= Log.Level.Debug) {
+ this._log.debug(
+ "Records that will be uploaded again because " +
+ "the server couldn't store them: " +
+ failed.join(", ")
+ );
+ }
+
+ counts.failed += failed.length;
+ Object.values(failed).forEach(message => {
+ countTelemetry.addOutgoingFailedReason(message);
+ });
+
+ for (let id of successful) {
+ this._modified.delete(id);
+ }
+
+ await this._onRecordsWritten(
+ successful,
+ failed,
+ postQueue.lastModified
+ );
+
+ // Advance lastSync since we've finished the batch.
+ if (postQueue.lastModified > lastSync) {
+ lastSync = postQueue.lastModified;
+ await this.setLastSync(lastSync);
+ }
+
+ // clear for next batch
+ failed.length = 0;
+ successful.length = 0;
+ };
+
+ let postQueue = up.newPostQueue(this._log, lastSync, handleResponse);
+
+ for (let id of modifiedIDs) {
+ let out;
+ let ok = false;
+ try {
+ out = await this._createRecord(id);
+ if (this._log.level <= Log.Level.Trace) {
+ this._log.trace("Outgoing: " + out);
+ }
+ await out.encrypt(
+ this.service.collectionKeys.keyForCollection(this.name)
+ );
+ ok = true;
+ } catch (ex) {
+ this._log.warn("Error creating record", ex);
+ ++counts.failed;
+ countTelemetry.addOutgoingFailedReason(ex.message);
+ if (Async.isShutdownException(ex) || !this.allowSkippedRecord) {
+ if (!this.allowSkippedRecord) {
+ // Don't bother for shutdown errors
+ Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+ }
+ throw ex;
+ }
+ }
+ if (ok) {
+ let { enqueued, error } = await postQueue.enqueue(out);
+ if (!enqueued) {
+ ++counts.failed;
+ countTelemetry.addOutgoingFailedReason(error.message);
+ if (!this.allowSkippedRecord) {
+ Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+ this._log.warn(
+ `Failed to enqueue record "${id}" (aborting)`,
+ error
+ );
+ throw error;
+ }
+ this._modified.delete(id);
+ this._log.warn(
+ `Failed to enqueue record "${id}" (skipping)`,
+ error
+ );
+ }
+ }
+ await Async.promiseYield();
+ }
+ await postQueue.flush(true);
+ }
+
+ if (counts.sent || counts.failed) {
+ Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+ }
+ },
+
+ async _onRecordsWritten(succeeded, failed, serverModifiedTime) {
+ // Implement this method to take specific actions against successfully
+ // uploaded records and failed records.
+ },
+
+ // Any cleanup necessary.
+ // Save the current snapshot so as to calculate changes at next sync
+ async _syncFinish() {
+ this._log.trace("Finishing up sync");
+
+ let doDelete = async (key, val) => {
+ let coll = new Collection(this.engineURL, this._recordObj, this.service);
+ coll[key] = val;
+ await coll.delete();
+ };
+
+ for (let [key, val] of Object.entries(this._delete)) {
+ // Remove the key for future uses
+ delete this._delete[key];
+
+ this._log.trace("doing post-sync deletions", { key, val });
+ // Send a simple delete for the property
+ if (key != "ids" || val.length <= 100) {
+ await doDelete(key, val);
+ } else {
+ // For many ids, split into chunks of at most 100
+ while (val.length) {
+ await doDelete(key, val.slice(0, 100));
+ val = val.slice(100);
+ }
+ }
+ }
+ this.hasSyncedThisSession = true;
+ await this._tracker.asyncObserver.promiseObserversComplete();
+ },
+
+ async _syncCleanup() {
+ try {
+ // Mark failed WBOs as changed again so they are reuploaded next time.
+ await this.trackRemainingChanges();
+ } finally {
+ this._modified.clear();
+ }
+ },
+
+ async _sync() {
+ try {
+ Async.checkAppReady();
+ await this._syncStartup();
+ Async.checkAppReady();
+ Observers.notify("weave:engine:sync:status", "process-incoming");
+ await this._processIncoming();
+ Async.checkAppReady();
+ Observers.notify("weave:engine:sync:status", "upload-outgoing");
+ try {
+ await this._uploadOutgoing();
+ Async.checkAppReady();
+ await this._syncFinish();
+ } catch (ex) {
+ if (!ex.status || ex.status != 412) {
+ throw ex;
+ }
+ // a 412 posting just means another client raced - but we don't want
+ // to treat that as a sync error - the next sync is almost certain
+ // to work.
+ this._log.warn("412 error during sync - will retry.");
+ }
+ } finally {
+ await this._syncCleanup();
+ }
+ },
+
+ async canDecrypt() {
+ // Report failure even if there's nothing to decrypt
+ let canDecrypt = false;
+
+ // Fetch the most recently uploaded record and try to decrypt it
+ let test = new Collection(this.engineURL, this._recordObj, this.service);
+ test.limit = 1;
+ test.sort = "newest";
+ test.full = true;
+
+ let key = this.service.collectionKeys.keyForCollection(this.name);
+
+ // Any failure fetching/decrypting will just result in false
+ try {
+ this._log.trace("Trying to decrypt a record from the server..");
+ let json = (await test.get()).obj[0];
+ let record = new this._recordObj();
+ record.deserialize(json);
+ await record.decrypt(key);
+ canDecrypt = true;
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Failed test decrypt", ex);
+ }
+
+ return canDecrypt;
+ },
+
+ /**
+ * Deletes the collection for this engine on the server, and removes all local
+ * Sync metadata for this engine. This does *not* remove any existing data on
+ * other clients. This is called when we reset the sync ID.
+ */
+ async wipeServer() {
+ await this._deleteServerCollection();
+ await this._resetClient();
+ },
+
+ /**
+ * Deletes the collection for this engine on the server, without removing
+ * any local Sync metadata or user data. Deleting the collection will not
+ * remove any user data on other clients, but will force other clients to
+ * start over as a first sync.
+ */
+ async _deleteServerCollection() {
+ let response = await this.service.resource(this.engineURL).delete();
+ if (response.status != 200 && response.status != 404) {
+ throw response;
+ }
+ },
+
+ async removeClientData() {
+ // Implement this method in engines that store client specific data
+ // on the server.
+ },
+
+ /*
+ * Decide on (and partially effect) an error-handling strategy.
+ *
+ * Asks the Service to respond to an HMAC error, which might result in keys
+ * being downloaded. That call returns true if an action which might allow a
+ * retry to occur.
+ *
+ * If `mayRetry` is truthy, and the Service suggests a retry,
+ * handleHMACMismatch returns kRecoveryStrategy.retry. Otherwise, it returns
+ * kRecoveryStrategy.error.
+ *
+ * Subclasses of SyncEngine can override this method to allow for different
+ * behavior -- e.g., to delete and ignore erroneous entries.
+ *
+ * All return values will be part of the kRecoveryStrategy enumeration.
+ */
+ async handleHMACMismatch(item, mayRetry) {
+ // By default we either try again, or bail out noisily.
+ return (await this.service.handleHMACEvent()) && mayRetry
+ ? SyncEngine.kRecoveryStrategy.retry
+ : SyncEngine.kRecoveryStrategy.error;
+ },
+
+ /**
+ * Returns a changeset containing all items in the store. The default
+ * implementation returns a changeset with timestamps from long ago, to
+ * ensure we always use the remote version if one exists.
+ *
+ * This function is only called for the first sync. Subsequent syncs call
+ * `pullNewChanges`.
+ *
+ * @return A `Changeset` object.
+ */
+ async pullAllChanges() {
+ let changes = {};
+ let ids = await this._store.getAllIDs();
+ for (let id in ids) {
+ changes[id] = 0;
+ }
+ return changes;
+ },
+
+ /*
+ * Returns a changeset containing entries for all currently tracked items.
+ * The default implementation returns a changeset with timestamps indicating
+ * when the item was added to the tracker.
+ *
+ * @return A `Changeset` object.
+ */
+ async pullNewChanges() {
+ await this._tracker.asyncObserver.promiseObserversComplete();
+ return this.getChangedIDs();
+ },
+
+ /**
+ * Adds all remaining changeset entries back to the tracker, typically for
+ * items that failed to upload. This method is called at the end of each sync.
+ *
+ */
+ async trackRemainingChanges() {
+ for (let [id, change] of this._modified.entries()) {
+ await this._tracker.addChangedID(id, change);
+ }
+ },
+
+ /**
+ * Removes all local Sync metadata for this engine, but keeps all existing
+ * local user data.
+ */
+ async resetClient() {
+ return this._notify("reset-client", this.name, this._resetClient)();
+ },
+
+ async _resetClient() {
+ await this.resetLastSync();
+ this.hasSyncedThisSession = false;
+ this.previousFailed = new SerializableSet();
+ this.toFetch = new SerializableSet();
+ },
+
+ /**
+ * Removes all local Sync metadata and user data for this engine.
+ */
+ async wipeClient() {
+ return this._notify("wipe-client", this.name, this._wipeClient)();
+ },
+
+ async _wipeClient() {
+ await this.resetClient();
+ this._log.debug("Deleting all local data");
+ this._tracker.ignoreAll = true;
+ await this._store.wipe();
+ this._tracker.ignoreAll = false;
+ this._tracker.clearChangedIDs();
+ },
+
+ /**
+ * If one exists, initialize and return a validator for this engine (which
+ * must have a `validate(engine)` method that returns a promise to an object
+ * with a getSummary method). Otherwise return null.
+ */
+ getValidator() {
+ return null;
+ },
+
+ async finalize() {
+ Svc.Prefs.ignore(`engine.${this.prefName}`, this.asyncObserver);
+ await this.asyncObserver.promiseObserversComplete();
+ await this._tracker.finalize();
+ await this._toFetchStorage.finalize();
+ await this._previousFailedStorage.finalize();
+ },
+
+ // Returns a new watchdog. Exposed for tests.
+ _newWatchdog() {
+ return Async.watchdog();
+ },
+};
+
+/**
+ * A changeset is created for each sync in `Engine::get{Changed, All}IDs`,
+ * and stores opaque change data for tracked IDs. The default implementation
+ * only records timestamps, though engines can extend this to store additional
+ * data for each entry.
+ */
+export class Changeset {
+ // Creates an empty changeset.
+ constructor() {
+ this.changes = {};
+ }
+
+ // Returns the last modified time, in seconds, for an entry in the changeset.
+ // `id` is guaranteed to be in the set.
+ getModifiedTimestamp(id) {
+ return this.changes[id];
+ }
+
+ // Adds a change for a tracked ID to the changeset.
+ set(id, change) {
+ this.changes[id] = change;
+ }
+
+ // Adds multiple entries to the changeset, preserving existing entries.
+ insert(changes) {
+ Object.assign(this.changes, changes);
+ }
+
+ // Overwrites the existing set of tracked changes with new entries.
+ replace(changes) {
+ this.changes = changes;
+ }
+
+ // Indicates whether an entry is in the changeset.
+ has(id) {
+ return id in this.changes;
+ }
+
+ // Deletes an entry from the changeset. Used to clean up entries for
+ // reconciled and successfully uploaded records.
+ delete(id) {
+ delete this.changes[id];
+ }
+
+ // Changes the ID of an entry in the changeset. Used when reconciling
+ // duplicates that have local changes.
+ changeID(oldID, newID) {
+ this.changes[newID] = this.changes[oldID];
+ delete this.changes[oldID];
+ }
+
+ // Returns an array of all tracked IDs in this changeset.
+ ids() {
+ return Object.keys(this.changes);
+ }
+
+ // Returns an array of `[id, change]` tuples. Used to repopulate the tracker
+ // with entries for failed uploads at the end of a sync.
+ entries() {
+ return Object.entries(this.changes);
+ }
+
+ // Returns the number of entries in this changeset.
+ count() {
+ return this.ids().length;
+ }
+
+ // Clears the changeset.
+ clear() {
+ this.changes = {};
+ }
+}
diff --git a/services/sync/modules/engines/addons.sys.mjs b/services/sync/modules/engines/addons.sys.mjs
new file mode 100644
index 0000000000..d1b766a957
--- /dev/null
+++ b/services/sync/modules/engines/addons.sys.mjs
@@ -0,0 +1,820 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file defines the add-on sync functionality.
+ *
+ * There are currently a number of known limitations:
+ * - We only sync XPI extensions and themes available from addons.mozilla.org.
+ * We hope to expand support for other add-ons eventually.
+ * - We only attempt syncing of add-ons between applications of the same type.
+ * This means add-ons will not synchronize between Firefox desktop and
+ * Firefox mobile, for example. This is because of significant add-on
+ * incompatibility between application types.
+ *
+ * Add-on records exist for each known {add-on, app-id} pair in the Sync client
+ * set. Each record has a randomly chosen GUID. The records then contain
+ * basic metadata about the add-on.
+ *
+ * We currently synchronize:
+ *
+ * - Installations
+ * - Uninstallations
+ * - User enabling and disabling
+ *
+ * Synchronization is influenced by the following preferences:
+ *
+ * - services.sync.addons.ignoreUserEnabledChanges
+ * - services.sync.addons.trustedSourceHostnames
+ *
+ * and also influenced by whether addons have repository caching enabled and
+ * whether they allow installation of addons from insecure options (both of
+ * which are themselves influenced by the "extensions." pref branch)
+ *
+ * See the documentation in all.js for the behavior of these prefs.
+ */
+
+import { Preferences } from "resource://gre/modules/Preferences.sys.mjs";
+
+import { AddonUtils } from "resource://services-sync/addonutils.sys.mjs";
+import { AddonsReconciler } from "resource://services-sync/addonsreconciler.sys.mjs";
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import { CollectionValidator } from "resource://services-sync/collection_validator.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+ AddonRepository: "resource://gre/modules/addons/AddonRepository.sys.mjs",
+});
+
+// 7 days in milliseconds.
+const PRUNE_ADDON_CHANGES_THRESHOLD = 60 * 60 * 24 * 7 * 1000;
+
+/**
+ * AddonRecord represents the state of an add-on in an application.
+ *
+ * Each add-on has its own record for each application ID it is installed
+ * on.
+ *
+ * The ID of add-on records is a randomly-generated GUID. It is random instead
+ * of deterministic so the URIs of the records cannot be guessed and so
+ * compromised server credentials won't result in disclosure of the specific
+ * add-ons present in a Sync account.
+ *
+ * The record contains the following fields:
+ *
+ * addonID
+ * ID of the add-on. This correlates to the "id" property on an Addon type.
+ *
+ * applicationID
+ * The application ID this record is associated with.
+ *
+ * enabled
+ * Boolean stating whether add-on is enabled or disabled by the user.
+ *
+ * source
+ * String indicating where an add-on is from. Currently, we only support
+ * the value "amo" which indicates that the add-on came from the official
+ * add-ons repository, addons.mozilla.org. In the future, we may support
+ * installing add-ons from other sources. This provides a future-compatible
+ * mechanism for clients to only apply records they know how to handle.
+ */
+function AddonRecord(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+AddonRecord.prototype = {
+ _logName: "Record.Addon",
+};
+Object.setPrototypeOf(AddonRecord.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(AddonRecord, "cleartext", [
+ "addonID",
+ "applicationID",
+ "enabled",
+ "source",
+]);
+
+/**
+ * The AddonsEngine handles synchronization of add-ons between clients.
+ *
+ * The engine maintains an instance of an AddonsReconciler, which is the entity
+ * maintaining state for add-ons. It provides the history and tracking APIs
+ * that AddonManager doesn't.
+ *
+ * The engine instance overrides a handful of functions on the base class. The
+ * rationale for each is documented by that function.
+ */
+export function AddonsEngine(service) {
+ SyncEngine.call(this, "Addons", service);
+
+ this._reconciler = new AddonsReconciler(this._tracker.asyncObserver);
+}
+
+AddonsEngine.prototype = {
+ _storeObj: AddonsStore,
+ _trackerObj: AddonsTracker,
+ _recordObj: AddonRecord,
+ version: 1,
+
+ syncPriority: 5,
+
+ _reconciler: null,
+
+ async initialize() {
+ await SyncEngine.prototype.initialize.call(this);
+ await this._reconciler.ensureStateLoaded();
+ },
+
+ /**
+ * Override parent method to find add-ons by their public ID, not Sync GUID.
+ */
+ async _findDupe(item) {
+ let id = item.addonID;
+
+ // The reconciler should have been updated at the top of the sync, so we
+ // can assume it is up to date when this function is called.
+ let addons = this._reconciler.addons;
+ if (!(id in addons)) {
+ return null;
+ }
+
+ let addon = addons[id];
+ if (addon.guid != item.id) {
+ return addon.guid;
+ }
+
+ return null;
+ },
+
+ /**
+ * Override getChangedIDs to pull in tracker changes plus changes from the
+ * reconciler log.
+ */
+ async getChangedIDs() {
+ let changes = {};
+ const changedIDs = await this._tracker.getChangedIDs();
+ for (let [id, modified] of Object.entries(changedIDs)) {
+ changes[id] = modified;
+ }
+
+ let lastSync = await this.getLastSync();
+ let lastSyncDate = new Date(lastSync * 1000);
+
+ // The reconciler should have been refreshed at the beginning of a sync and
+ // we assume this function is only called from within a sync.
+ let reconcilerChanges = this._reconciler.getChangesSinceDate(lastSyncDate);
+ let addons = this._reconciler.addons;
+ for (let change of reconcilerChanges) {
+ let changeTime = change[0];
+ let id = change[2];
+
+ if (!(id in addons)) {
+ continue;
+ }
+
+ // Keep newest modified time.
+ if (id in changes && changeTime < changes[id]) {
+ continue;
+ }
+
+ if (!(await this.isAddonSyncable(addons[id]))) {
+ continue;
+ }
+
+ this._log.debug("Adding changed add-on from changes log: " + id);
+ let addon = addons[id];
+ changes[addon.guid] = changeTime.getTime() / 1000;
+ }
+
+ return changes;
+ },
+
+ /**
+ * Override start of sync function to refresh reconciler.
+ *
+ * Many functions in this class assume the reconciler is refreshed at the
+ * top of a sync. If this ever changes, those functions should be revisited.
+ *
+ * Technically speaking, we don't need to refresh the reconciler on every
+ * sync since it is installed as an AddonManager listener. However, add-ons
+ * are complicated and we force a full refresh, just in case the listeners
+ * missed something.
+ */
+ async _syncStartup() {
+ // We refresh state before calling parent because syncStartup in the parent
+ // looks for changed IDs, which is dependent on add-on state being up to
+ // date.
+ await this._refreshReconcilerState();
+ return SyncEngine.prototype._syncStartup.call(this);
+ },
+
+ /**
+ * Override end of sync to perform a little housekeeping on the reconciler.
+ *
+ * We prune changes to prevent the reconciler state from growing without
+ * bound. Even if it grows unbounded, there would have to be many add-on
+ * changes (thousands) for it to slow things down significantly. This is
+ * highly unlikely to occur. Still, we exercise defense just in case.
+ */
+ async _syncCleanup() {
+ let lastSync = await this.getLastSync();
+ let ms = 1000 * lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
+ this._reconciler.pruneChangesBeforeDate(new Date(ms));
+ return SyncEngine.prototype._syncCleanup.call(this);
+ },
+
+ /**
+ * Helper function to ensure reconciler is up to date.
+ *
+ * This will load the reconciler's state from the file
+ * system (if needed) and refresh the state of the reconciler.
+ */
+ async _refreshReconcilerState() {
+ this._log.debug("Refreshing reconciler state");
+ return this._reconciler.refreshGlobalState();
+ },
+
+ // Returns a promise
+ isAddonSyncable(addon, ignoreRepoCheck) {
+ return this._store.isAddonSyncable(addon, ignoreRepoCheck);
+ },
+};
+Object.setPrototypeOf(AddonsEngine.prototype, SyncEngine.prototype);
+
+/**
+ * This is the primary interface between Sync and the Addons Manager.
+ *
+ * In addition to the core store APIs, we provide convenience functions to wrap
+ * Add-on Manager APIs with Sync-specific semantics.
+ */
+function AddonsStore(name, engine) {
+ Store.call(this, name, engine);
+}
+AddonsStore.prototype = {
+ // Define the add-on types (.type) that we support.
+ _syncableTypes: ["extension", "theme"],
+
+ _extensionsPrefs: new Preferences("extensions."),
+
+ get reconciler() {
+ return this.engine._reconciler;
+ },
+
+ /**
+ * Override applyIncoming to filter out records we can't handle.
+ */
+ async applyIncoming(record) {
+ // The fields we look at aren't present when the record is deleted.
+ if (!record.deleted) {
+ // Ignore records not belonging to our application ID because that is the
+ // current policy.
+ if (record.applicationID != Services.appinfo.ID) {
+ this._log.info(
+ "Ignoring incoming record from other App ID: " + record.id
+ );
+ return;
+ }
+
+ // Ignore records that aren't from the official add-on repository, as that
+ // is our current policy.
+ if (record.source != "amo") {
+ this._log.info(
+ "Ignoring unknown add-on source (" +
+ record.source +
+ ")" +
+ " for " +
+ record.id
+ );
+ return;
+ }
+ }
+
+ // Ignore incoming records for which an existing non-syncable addon
+ // exists. Note that we do not insist that the addon manager already have
+ // metadata for this addon - it's possible our reconciler previously saw the
+ // addon but the addon-manager cache no longer has it - which is fine for a
+ // new incoming addon.
+ // (Note that most other cases where the addon-manager cache is invalid
+ // doesn't get this treatment because that cache self-repairs after some
+ // time - but it only re-populates addons which are currently installed.)
+ let existingMeta = this.reconciler.addons[record.addonID];
+ if (
+ existingMeta &&
+ !(await this.isAddonSyncable(existingMeta, /* ignoreRepoCheck */ true))
+ ) {
+ this._log.info(
+ "Ignoring incoming record for an existing but non-syncable addon",
+ record.addonID
+ );
+ return;
+ }
+
+ await Store.prototype.applyIncoming.call(this, record);
+ },
+
+ /**
+ * Provides core Store API to create/install an add-on from a record.
+ */
+ async create(record) {
+ // This will throw if there was an error. This will get caught by the sync
+ // engine and the record will try to be applied later.
+ const results = await AddonUtils.installAddons([
+ {
+ id: record.addonID,
+ syncGUID: record.id,
+ enabled: record.enabled,
+ requireSecureURI: this._extensionsPrefs.get(
+ "install.requireSecureOrigin",
+ true
+ ),
+ },
+ ]);
+
+ if (results.skipped.includes(record.addonID)) {
+ this._log.info("Add-on skipped: " + record.addonID);
+ // Just early-return for skipped addons - we don't want to arrange to
+ // try again next time because the condition that caused up to skip
+ // will remain true for this addon forever.
+ return;
+ }
+
+ let addon;
+ for (let a of results.addons) {
+ if (a.id == record.addonID) {
+ addon = a;
+ break;
+ }
+ }
+
+ // This should never happen, but is present as a fail-safe.
+ if (!addon) {
+ throw new Error("Add-on not found after install: " + record.addonID);
+ }
+
+ this._log.info("Add-on installed: " + record.addonID);
+ },
+
+ /**
+ * Provides core Store API to remove/uninstall an add-on from a record.
+ */
+ async remove(record) {
+ // If this is called, the payload is empty, so we have to find by GUID.
+ let addon = await this.getAddonByGUID(record.id);
+ if (!addon) {
+ // We don't throw because if the add-on could not be found then we assume
+ // it has already been uninstalled and there is nothing for this function
+ // to do.
+ return;
+ }
+
+ this._log.info("Uninstalling add-on: " + addon.id);
+ await AddonUtils.uninstallAddon(addon);
+ },
+
+ /**
+ * Provides core Store API to update an add-on from a record.
+ */
+ async update(record) {
+ let addon = await this.getAddonByID(record.addonID);
+
+ // update() is called if !this.itemExists. And, since itemExists consults
+ // the reconciler only, we need to take care of some corner cases.
+ //
+ // First, the reconciler could know about an add-on that was uninstalled
+ // and no longer present in the add-ons manager.
+ if (!addon) {
+ await this.create(record);
+ return;
+ }
+
+ // It's also possible that the add-on is non-restartless and has pending
+ // install/uninstall activity.
+ //
+ // We wouldn't get here if the incoming record was for a deletion. So,
+ // check for pending uninstall and cancel if necessary.
+ if (addon.pendingOperations & lazy.AddonManager.PENDING_UNINSTALL) {
+ addon.cancelUninstall();
+
+ // We continue with processing because there could be state or ID change.
+ }
+
+ await this.updateUserDisabled(addon, !record.enabled);
+ },
+
+ /**
+ * Provide core Store API to determine if a record exists.
+ */
+ async itemExists(guid) {
+ let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
+
+ return !!addon;
+ },
+
+ /**
+ * Create an add-on record from its GUID.
+ *
+ * @param guid
+ * Add-on GUID (from extensions DB)
+ * @param collection
+ * Collection to add record to.
+ *
+ * @return AddonRecord instance
+ */
+ async createRecord(guid, collection) {
+ let record = new AddonRecord(collection, guid);
+ record.applicationID = Services.appinfo.ID;
+
+ let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
+
+ // If we don't know about this GUID or if it has been uninstalled, we mark
+ // the record as deleted.
+ if (!addon || !addon.installed) {
+ record.deleted = true;
+ return record;
+ }
+
+ record.modified = addon.modified.getTime() / 1000;
+
+ record.addonID = addon.id;
+ record.enabled = addon.enabled;
+
+ // This needs to be dynamic when add-ons don't come from AddonRepository.
+ record.source = "amo";
+
+ return record;
+ },
+
+ /**
+ * Changes the id of an add-on.
+ *
+ * This implements a core API of the store.
+ */
+ async changeItemID(oldID, newID) {
+ // We always update the GUID in the reconciler because it will be
+ // referenced later in the sync process.
+ let state = this.reconciler.getAddonStateFromSyncGUID(oldID);
+ if (state) {
+ state.guid = newID;
+ await this.reconciler.saveState();
+ }
+
+ let addon = await this.getAddonByGUID(oldID);
+ if (!addon) {
+ this._log.debug(
+ "Cannot change item ID (" +
+ oldID +
+ ") in Add-on " +
+ "Manager because old add-on not present: " +
+ oldID
+ );
+ return;
+ }
+
+ addon.syncGUID = newID;
+ },
+
+ /**
+ * Obtain the set of all syncable add-on Sync GUIDs.
+ *
+ * This implements a core Store API.
+ */
+ async getAllIDs() {
+ let ids = {};
+
+ let addons = this.reconciler.addons;
+ for (let id in addons) {
+ let addon = addons[id];
+ if (await this.isAddonSyncable(addon)) {
+ ids[addon.guid] = true;
+ }
+ }
+
+ return ids;
+ },
+
+ /**
+ * Wipe engine data.
+ *
+ * This uninstalls all syncable addons from the application. In case of
+ * error, it logs the error and keeps trying with other add-ons.
+ */
+ async wipe() {
+ this._log.info("Processing wipe.");
+
+ await this.engine._refreshReconcilerState();
+
+ // We only wipe syncable add-ons. Wipe is a Sync feature not a security
+ // feature.
+ let ids = await this.getAllIDs();
+ for (let guid in ids) {
+ let addon = await this.getAddonByGUID(guid);
+ if (!addon) {
+ this._log.debug(
+ "Ignoring add-on because it couldn't be obtained: " + guid
+ );
+ continue;
+ }
+
+ this._log.info("Uninstalling add-on as part of wipe: " + addon.id);
+ await Utils.catch.call(this, () => addon.uninstall())();
+ }
+ },
+
+ /** *************************************************************************
+ * Functions below are unique to this store and not part of the Store API *
+ ***************************************************************************/
+
+ /**
+ * Obtain an add-on from its public ID.
+ *
+ * @param id
+ * Add-on ID
+ * @return Addon or undefined if not found
+ */
+ async getAddonByID(id) {
+ return lazy.AddonManager.getAddonByID(id);
+ },
+
+ /**
+ * Obtain an add-on from its Sync GUID.
+ *
+ * @param guid
+ * Add-on Sync GUID
+ * @return DBAddonInternal or null
+ */
+ async getAddonByGUID(guid) {
+ return lazy.AddonManager.getAddonBySyncGUID(guid);
+ },
+
+ /**
+ * Determines whether an add-on is suitable for Sync.
+ *
+ * @param addon
+ * Addon instance
+ * @param ignoreRepoCheck
+ * Should we skip checking the Addons repository (primarially useful
+ * for testing and validation).
+ * @return Boolean indicating whether it is appropriate for Sync
+ */
+ async isAddonSyncable(addon, ignoreRepoCheck = false) {
+ // Currently, we limit syncable add-ons to those that are:
+ // 1) In a well-defined set of types
+ // 2) Installed in the current profile
+ // 3) Not installed by a foreign entity (i.e. installed by the app)
+ // since they act like global extensions.
+ // 4) Is not a hotfix.
+ // 5) The addons XPIProvider doesn't veto it (i.e not being installed in
+ // the profile directory, or any other reasons it says the addon can't
+ // be synced)
+ // 6) Are installed from AMO
+
+ // We could represent the test as a complex boolean expression. We go the
+ // verbose route so the failure reason is logged.
+ if (!addon) {
+ this._log.debug("Null object passed to isAddonSyncable.");
+ return false;
+ }
+
+ if (!this._syncableTypes.includes(addon.type)) {
+ this._log.debug(
+ addon.id + " not syncable: type not in allowed list: " + addon.type
+ );
+ return false;
+ }
+
+ if (!(addon.scope & lazy.AddonManager.SCOPE_PROFILE)) {
+ this._log.debug(addon.id + " not syncable: not installed in profile.");
+ return false;
+ }
+
+ // If the addon manager says it's not syncable, we skip it.
+ if (!addon.isSyncable) {
+ this._log.debug(addon.id + " not syncable: vetoed by the addon manager.");
+ return false;
+ }
+
+ // This may be too aggressive. If an add-on is downloaded from AMO and
+ // manually placed in the profile directory, foreignInstall will be set.
+ // Arguably, that add-on should be syncable.
+ // TODO Address the edge case and come up with more robust heuristics.
+ if (addon.foreignInstall) {
+ this._log.debug(addon.id + " not syncable: is foreign install.");
+ return false;
+ }
+
+ // If the AddonRepository's cache isn't enabled (which it typically isn't
+ // in tests), getCachedAddonByID always returns null - so skip the check
+ // in that case. We also provide a way to specifically opt-out of the check
+ // even if the cache is enabled, which is used by the validators.
+ if (ignoreRepoCheck || !lazy.AddonRepository.cacheEnabled) {
+ return true;
+ }
+
+ let result = await new Promise(res => {
+ lazy.AddonRepository.getCachedAddonByID(addon.id, res);
+ });
+
+ if (!result) {
+ this._log.debug(
+ addon.id + " not syncable: add-on not found in add-on repository."
+ );
+ return false;
+ }
+
+ return this.isSourceURITrusted(result.sourceURI);
+ },
+
+ /**
+ * Determine whether an add-on's sourceURI field is trusted and the add-on
+ * can be installed.
+ *
+ * This function should only ever be called from isAddonSyncable(). It is
+ * exposed as a separate function to make testing easier.
+ *
+ * @param uri
+ * nsIURI instance to validate
+ * @return bool
+ */
+ isSourceURITrusted: function isSourceURITrusted(uri) {
+ // For security reasons, we currently limit synced add-ons to those
+ // installed from trusted hostname(s). We additionally require TLS with
+ // the add-ons site to help prevent forgeries.
+ let trustedHostnames = Svc.Prefs.get(
+ "addons.trustedSourceHostnames",
+ ""
+ ).split(",");
+
+ if (!uri) {
+ this._log.debug("Undefined argument to isSourceURITrusted().");
+ return false;
+ }
+
+ // Scheme is validated before the hostname because uri.host may not be
+ // populated for certain schemes. It appears to always be populated for
+ // https, so we avoid the potential NS_ERROR_FAILURE on field access.
+ if (uri.scheme != "https") {
+ this._log.debug("Source URI not HTTPS: " + uri.spec);
+ return false;
+ }
+
+ if (!trustedHostnames.includes(uri.host)) {
+ this._log.debug("Source hostname not trusted: " + uri.host);
+ return false;
+ }
+
+ return true;
+ },
+
+ /**
+ * Update the userDisabled flag on an add-on.
+ *
+ * This will enable or disable an add-on. It has no return value and does
+ * not catch or handle exceptions thrown by the addon manager. If no action
+ * is needed it will return immediately.
+ *
+ * @param addon
+ * Addon instance to manipulate.
+ * @param value
+ * Boolean to which to set userDisabled on the passed Addon.
+ */
+ async updateUserDisabled(addon, value) {
+ if (addon.userDisabled == value) {
+ return;
+ }
+
+ // A pref allows changes to the enabled flag to be ignored.
+ if (Svc.Prefs.get("addons.ignoreUserEnabledChanges", false)) {
+ this._log.info(
+ "Ignoring enabled state change due to preference: " + addon.id
+ );
+ return;
+ }
+
+ AddonUtils.updateUserDisabled(addon, value);
+ // updating this flag doesn't send a notification for appDisabled addons,
+ // meaning the reconciler will not update its state and may resync the
+ // addon - so explicitly rectify the state (bug 1366994)
+ if (addon.appDisabled) {
+ await this.reconciler.rectifyStateFromAddon(addon);
+ }
+ },
+};
+
+Object.setPrototypeOf(AddonsStore.prototype, Store.prototype);
+
+/**
+ * The add-ons tracker keeps track of real-time changes to add-ons.
+ *
+ * It hooks up to the reconciler and receives notifications directly from it.
+ */
+function AddonsTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+AddonsTracker.prototype = {
+ get reconciler() {
+ return this.engine._reconciler;
+ },
+
+ get store() {
+ return this.engine._store;
+ },
+
+ /**
+ * This callback is executed whenever the AddonsReconciler sends out a change
+ * notification. See AddonsReconciler.addChangeListener().
+ */
+ async changeListener(date, change, addon) {
+ this._log.debug("changeListener invoked: " + change + " " + addon.id);
+ // Ignore changes that occur during sync.
+ if (this.ignoreAll) {
+ return;
+ }
+
+ if (!(await this.store.isAddonSyncable(addon))) {
+ this._log.debug(
+ "Ignoring change because add-on isn't syncable: " + addon.id
+ );
+ return;
+ }
+
+ const added = await this.addChangedID(addon.guid, date.getTime() / 1000);
+ if (added) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ }
+ },
+
+ onStart() {
+ this.reconciler.startListening();
+ this.reconciler.addChangeListener(this);
+ },
+
+ onStop() {
+ this.reconciler.removeChangeListener(this);
+ this.reconciler.stopListening();
+ },
+};
+
+Object.setPrototypeOf(AddonsTracker.prototype, LegacyTracker.prototype);
+
+export class AddonValidator extends CollectionValidator {
+ constructor(engine = null) {
+ super("addons", "id", ["addonID", "enabled", "applicationID", "source"]);
+ this.engine = engine;
+ }
+
+ async getClientItems() {
+ return lazy.AddonManager.getAllAddons();
+ }
+
+ normalizeClientItem(item) {
+ let enabled = !item.userDisabled;
+ if (item.pendingOperations & lazy.AddonManager.PENDING_ENABLE) {
+ enabled = true;
+ } else if (item.pendingOperations & lazy.AddonManager.PENDING_DISABLE) {
+ enabled = false;
+ }
+ return {
+ enabled,
+ id: item.syncGUID,
+ addonID: item.id,
+ applicationID: Services.appinfo.ID,
+ source: "amo", // check item.foreignInstall?
+ original: item,
+ };
+ }
+
+ async normalizeServerItem(item) {
+ let guid = await this.engine._findDupe(item);
+ if (guid) {
+ item.id = guid;
+ }
+ return item;
+ }
+
+ clientUnderstands(item) {
+ return item.applicationID === Services.appinfo.ID;
+ }
+
+ async syncedByClient(item) {
+ return (
+ !item.original.hidden &&
+ !item.original.isSystem &&
+ !(
+ item.original.pendingOperations & lazy.AddonManager.PENDING_UNINSTALL
+ ) &&
+ // No need to await the returned promise explicitely:
+ // |expr1 && expr2| evaluates to expr2 if expr1 is true.
+ this.engine.isAddonSyncable(item.original, true)
+ );
+ }
+}
diff --git a/services/sync/modules/engines/bookmarks.sys.mjs b/services/sync/modules/engines/bookmarks.sys.mjs
new file mode 100644
index 0000000000..8724344084
--- /dev/null
+++ b/services/sync/modules/engines/bookmarks.sys.mjs
@@ -0,0 +1,953 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import {
+ Changeset,
+ Store,
+ SyncEngine,
+ Tracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Observers: "resource://services-common/observers.sys.mjs",
+ PlacesBackups: "resource://gre/modules/PlacesBackups.sys.mjs",
+ PlacesDBUtils: "resource://gre/modules/PlacesDBUtils.sys.mjs",
+ PlacesSyncUtils: "resource://gre/modules/PlacesSyncUtils.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+ Resource: "resource://services-sync/resource.sys.mjs",
+ SyncedBookmarksMirror: "resource://gre/modules/SyncedBookmarksMirror.sys.mjs",
+});
+
+const PLACES_MAINTENANCE_INTERVAL_SECONDS = 4 * 60 * 60; // 4 hours.
+
+const FOLDER_SORTINDEX = 1000000;
+
+// Roots that should be deleted from the server, instead of applied locally.
+// This matches `AndroidBrowserBookmarksRepositorySession::forbiddenGUID`,
+// but allows tags because we don't want to reparent tag folders or tag items
+// to "unfiled".
+const FORBIDDEN_INCOMING_IDS = ["pinned", "places", "readinglist"];
+
+// Items with these parents should be deleted from the server. We allow
+// children of the Places root, to avoid orphaning left pane queries and other
+// descendants of custom roots.
+const FORBIDDEN_INCOMING_PARENT_IDS = ["pinned", "readinglist"];
+
+// The tracker ignores changes made by import and restore, to avoid bumping the
+// score and triggering syncs during the process, as well as changes made by
+// Sync.
+XPCOMUtils.defineLazyGetter(lazy, "IGNORED_SOURCES", () => [
+ lazy.PlacesUtils.bookmarks.SOURCES.SYNC,
+ lazy.PlacesUtils.bookmarks.SOURCES.IMPORT,
+ lazy.PlacesUtils.bookmarks.SOURCES.RESTORE,
+ lazy.PlacesUtils.bookmarks.SOURCES.RESTORE_ON_STARTUP,
+ lazy.PlacesUtils.bookmarks.SOURCES.SYNC_REPARENT_REMOVED_FOLDER_CHILDREN,
+]);
+
+// The validation telemetry version for the engine. Version 1 is collected
+// by `bookmark_validator.js`, and checks value as well as structure
+// differences. Version 2 is collected by the engine as part of building the
+// remote tree, and checks structure differences only.
+const BOOKMARK_VALIDATOR_VERSION = 2;
+
+// The maximum time that the engine should wait before aborting a bookmark
+// merge.
+const BOOKMARK_APPLY_TIMEOUT_MS = 5 * 60 * 60 * 1000; // 5 minutes
+
+// The default frecency value to use when not known.
+const FRECENCY_UNKNOWN = -1;
+
+// Returns the constructor for a bookmark record type.
+function getTypeObject(type) {
+ switch (type) {
+ case "bookmark":
+ return Bookmark;
+ case "query":
+ return BookmarkQuery;
+ case "folder":
+ return BookmarkFolder;
+ case "livemark":
+ return Livemark;
+ case "separator":
+ return BookmarkSeparator;
+ case "item":
+ return PlacesItem;
+ }
+ return null;
+}
+
+export function PlacesItem(collection, id, type) {
+ CryptoWrapper.call(this, collection, id);
+ this.type = type || "item";
+}
+
+PlacesItem.prototype = {
+ async decrypt(keyBundle) {
+ // Do the normal CryptoWrapper decrypt, but change types before returning
+ let clear = await CryptoWrapper.prototype.decrypt.call(this, keyBundle);
+
+ // Convert the abstract places item to the actual object type
+ if (!this.deleted) {
+ Object.setPrototypeOf(this, this.getTypeObject(this.type).prototype);
+ }
+
+ return clear;
+ },
+
+ getTypeObject: function PlacesItem_getTypeObject(type) {
+ let recordObj = getTypeObject(type);
+ if (!recordObj) {
+ throw new Error("Unknown places item object type: " + type);
+ }
+ return recordObj;
+ },
+
+ _logName: "Sync.Record.PlacesItem",
+
+ // Converts the record to a Sync bookmark object that can be passed to
+ // `PlacesSyncUtils.bookmarks.{insert, update}`.
+ toSyncBookmark() {
+ let result = {
+ kind: this.type,
+ recordId: this.id,
+ parentRecordId: this.parentid,
+ };
+ let dateAdded = lazy.PlacesSyncUtils.bookmarks.ratchetTimestampBackwards(
+ this.dateAdded,
+ +this.modified * 1000
+ );
+ if (dateAdded > 0) {
+ result.dateAdded = dateAdded;
+ }
+ return result;
+ },
+
+ // Populates the record from a Sync bookmark object returned from
+ // `PlacesSyncUtils.bookmarks.fetch`.
+ fromSyncBookmark(item) {
+ this.parentid = item.parentRecordId;
+ this.parentName = item.parentTitle;
+ if (item.dateAdded) {
+ this.dateAdded = item.dateAdded;
+ }
+ },
+};
+
+Object.setPrototypeOf(PlacesItem.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(PlacesItem, "cleartext", [
+ "hasDupe",
+ "parentid",
+ "parentName",
+ "type",
+ "dateAdded",
+]);
+
+export function Bookmark(collection, id, type) {
+ PlacesItem.call(this, collection, id, type || "bookmark");
+}
+
+Bookmark.prototype = {
+ _logName: "Sync.Record.Bookmark",
+
+ toSyncBookmark() {
+ let info = PlacesItem.prototype.toSyncBookmark.call(this);
+ info.title = this.title;
+ info.url = this.bmkUri;
+ info.description = this.description;
+ info.tags = this.tags;
+ info.keyword = this.keyword;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.title = item.title;
+ this.bmkUri = item.url.href;
+ this.description = item.description;
+ this.tags = item.tags;
+ this.keyword = item.keyword;
+ },
+};
+
+Object.setPrototypeOf(Bookmark.prototype, PlacesItem.prototype);
+
+Utils.deferGetSet(Bookmark, "cleartext", [
+ "title",
+ "bmkUri",
+ "description",
+ "tags",
+ "keyword",
+]);
+
+export function BookmarkQuery(collection, id) {
+ Bookmark.call(this, collection, id, "query");
+}
+
+BookmarkQuery.prototype = {
+ _logName: "Sync.Record.BookmarkQuery",
+
+ toSyncBookmark() {
+ let info = Bookmark.prototype.toSyncBookmark.call(this);
+ info.folder = this.folderName || undefined; // empty string -> undefined
+ info.query = this.queryId;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ Bookmark.prototype.fromSyncBookmark.call(this, item);
+ this.folderName = item.folder || undefined; // empty string -> undefined
+ this.queryId = item.query;
+ },
+};
+
+Object.setPrototypeOf(BookmarkQuery.prototype, Bookmark.prototype);
+
+Utils.deferGetSet(BookmarkQuery, "cleartext", ["folderName", "queryId"]);
+
+export function BookmarkFolder(collection, id, type) {
+ PlacesItem.call(this, collection, id, type || "folder");
+}
+
+BookmarkFolder.prototype = {
+ _logName: "Sync.Record.Folder",
+
+ toSyncBookmark() {
+ let info = PlacesItem.prototype.toSyncBookmark.call(this);
+ info.description = this.description;
+ info.title = this.title;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.title = item.title;
+ this.description = item.description;
+ this.children = item.childRecordIds;
+ },
+};
+
+Object.setPrototypeOf(BookmarkFolder.prototype, PlacesItem.prototype);
+
+Utils.deferGetSet(BookmarkFolder, "cleartext", [
+ "description",
+ "title",
+ "children",
+]);
+
+export function Livemark(collection, id) {
+ BookmarkFolder.call(this, collection, id, "livemark");
+}
+
+Livemark.prototype = {
+ _logName: "Sync.Record.Livemark",
+
+ toSyncBookmark() {
+ let info = BookmarkFolder.prototype.toSyncBookmark.call(this);
+ info.feed = this.feedUri;
+ info.site = this.siteUri;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ BookmarkFolder.prototype.fromSyncBookmark.call(this, item);
+ this.feedUri = item.feed.href;
+ if (item.site) {
+ this.siteUri = item.site.href;
+ }
+ },
+};
+
+Object.setPrototypeOf(Livemark.prototype, BookmarkFolder.prototype);
+
+Utils.deferGetSet(Livemark, "cleartext", ["siteUri", "feedUri"]);
+
+export function BookmarkSeparator(collection, id) {
+ PlacesItem.call(this, collection, id, "separator");
+}
+
+BookmarkSeparator.prototype = {
+ _logName: "Sync.Record.Separator",
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.pos = item.index;
+ },
+};
+
+Object.setPrototypeOf(BookmarkSeparator.prototype, PlacesItem.prototype);
+
+Utils.deferGetSet(BookmarkSeparator, "cleartext", "pos");
+
+/**
+ * The bookmarks engine uses a different store that stages downloaded bookmarks
+ * in a separate database, instead of writing directly to Places. The buffer
+ * handles reconciliation, so we stub out `_reconcile`, and wait to pull changes
+ * until we're ready to upload.
+ */
+export function BookmarksEngine(service) {
+ SyncEngine.call(this, "Bookmarks", service);
+}
+
+BookmarksEngine.prototype = {
+ _recordObj: PlacesItem,
+ _trackerObj: BookmarksTracker,
+ _storeObj: BookmarksStore,
+ version: 2,
+ // Used to override the engine name in telemetry, so that we can distinguish
+ // this engine from the old, now removed non-buffered engine.
+ overrideTelemetryName: "bookmarks-buffered",
+
+ // Needed to ensure we don't miss items when resuming a sync that failed or
+ // aborted early.
+ _defaultSort: "oldest",
+
+ syncPriority: 4,
+ allowSkippedRecord: false,
+
+ async _ensureCurrentSyncID(newSyncID) {
+ await lazy.PlacesSyncUtils.bookmarks.ensureCurrentSyncId(newSyncID);
+ let buf = await this._store.ensureOpenMirror();
+ await buf.ensureCurrentSyncId(newSyncID);
+ },
+
+ async ensureCurrentSyncID(newSyncID) {
+ let shouldWipeRemote =
+ await lazy.PlacesSyncUtils.bookmarks.shouldWipeRemote();
+ if (!shouldWipeRemote) {
+ this._log.debug(
+ "Checking if server sync ID ${newSyncID} matches existing",
+ { newSyncID }
+ );
+ await this._ensureCurrentSyncID(newSyncID);
+ return newSyncID;
+ }
+ // We didn't take the new sync ID because we need to wipe the server
+ // and other clients after a restore. Send the command, wipe the
+ // server, and reset our sync ID to reupload everything.
+ this._log.debug(
+ "Ignoring server sync ID ${newSyncID} after restore; " +
+ "wiping server and resetting sync ID",
+ { newSyncID }
+ );
+ await this.service.clientsEngine.sendCommand(
+ "wipeEngine",
+ [this.name],
+ null,
+ { reason: "bookmark-restore" }
+ );
+ let assignedSyncID = await this.resetSyncID();
+ return assignedSyncID;
+ },
+
+ async getSyncID() {
+ return lazy.PlacesSyncUtils.bookmarks.getSyncId();
+ },
+
+ async resetSyncID() {
+ await this._deleteServerCollection();
+ return this.resetLocalSyncID();
+ },
+
+ async resetLocalSyncID() {
+ let newSyncID = await lazy.PlacesSyncUtils.bookmarks.resetSyncId();
+ this._log.debug("Assigned new sync ID ${newSyncID}", { newSyncID });
+ let buf = await this._store.ensureOpenMirror();
+ await buf.ensureCurrentSyncId(newSyncID);
+ return newSyncID;
+ },
+
+ async getLastSync() {
+ let mirror = await this._store.ensureOpenMirror();
+ return mirror.getCollectionHighWaterMark();
+ },
+
+ async setLastSync(lastSync) {
+ let mirror = await this._store.ensureOpenMirror();
+ await mirror.setCollectionLastModified(lastSync);
+ // Update the last sync time in Places so that reverting to the original
+ // bookmarks engine doesn't download records we've already applied.
+ await lazy.PlacesSyncUtils.bookmarks.setLastSync(lastSync);
+ },
+
+ async _syncStartup() {
+ await super._syncStartup();
+
+ try {
+ // For first syncs, back up the user's bookmarks.
+ let lastSync = await this.getLastSync();
+ if (!lastSync) {
+ this._log.debug("Bookmarks backup starting");
+ await lazy.PlacesBackups.create(null, true);
+ this._log.debug("Bookmarks backup done");
+ }
+ } catch (ex) {
+ // Failure to create a backup is somewhat bad, but probably not bad
+ // enough to prevent syncing of bookmarks - so just log the error and
+ // continue.
+ this._log.warn(
+ "Error while backing up bookmarks, but continuing with sync",
+ ex
+ );
+ }
+ },
+
+ async _sync() {
+ try {
+ await super._sync();
+ if (this._ranMaintenanceOnLastSync) {
+ // If the last sync failed, we ran maintenance, and this sync succeeded,
+ // maintenance likely fixed the issue.
+ this._ranMaintenanceOnLastSync = false;
+ this.service.recordTelemetryEvent("maintenance", "fix", "bookmarks");
+ }
+ } catch (ex) {
+ if (
+ Async.isShutdownException(ex) ||
+ ex.status > 0 ||
+ ex.name == "InterruptedError"
+ ) {
+ // Don't run maintenance on shutdown or HTTP errors, or if we aborted
+ // the sync because the user changed their bookmarks during merging.
+ throw ex;
+ }
+ if (ex.name == "MergeConflictError") {
+ this._log.warn(
+ "Bookmark syncing ran into a merge conflict error...will retry later"
+ );
+ return;
+ }
+ // Run Places maintenance periodically to try to recover from corruption
+ // that might have caused the sync to fail. We cap the interval because
+ // persistent failures likely indicate a problem that won't be fixed by
+ // running maintenance after every failed sync.
+ let elapsedSinceMaintenance =
+ Date.now() / 1000 -
+ Services.prefs.getIntPref("places.database.lastMaintenance", 0);
+ if (elapsedSinceMaintenance >= PLACES_MAINTENANCE_INTERVAL_SECONDS) {
+ this._log.error(
+ "Bookmark sync failed, ${elapsedSinceMaintenance}s " +
+ "elapsed since last run; running Places maintenance",
+ { elapsedSinceMaintenance }
+ );
+ await lazy.PlacesDBUtils.maintenanceOnIdle();
+ this._ranMaintenanceOnLastSync = true;
+ this.service.recordTelemetryEvent("maintenance", "run", "bookmarks");
+ } else {
+ this._ranMaintenanceOnLastSync = false;
+ }
+ throw ex;
+ }
+ },
+
+ async _syncFinish() {
+ await SyncEngine.prototype._syncFinish.call(this);
+ await lazy.PlacesSyncUtils.bookmarks.ensureMobileQuery();
+ },
+
+ async pullAllChanges() {
+ return this.pullNewChanges();
+ },
+
+ async trackRemainingChanges() {
+ let changes = this._modified.changes;
+ await lazy.PlacesSyncUtils.bookmarks.pushChanges(changes);
+ },
+
+ _deleteId(id) {
+ this._noteDeletedId(id);
+ },
+
+ // The bookmarks engine rarely calls this method directly, except in tests or
+ // when handling a `reset{All, Engine}` command from another client. We
+ // usually reset local Sync metadata on a sync ID mismatch, which both engines
+ // override with logic that lives in Places and the mirror.
+ async _resetClient() {
+ await super._resetClient();
+ await lazy.PlacesSyncUtils.bookmarks.reset();
+ let buf = await this._store.ensureOpenMirror();
+ await buf.reset();
+ },
+
+ // Cleans up the Places root, reading list items (ignored in bug 762118,
+ // removed in bug 1155684), and pinned sites.
+ _shouldDeleteRemotely(incomingItem) {
+ return (
+ FORBIDDEN_INCOMING_IDS.includes(incomingItem.id) ||
+ FORBIDDEN_INCOMING_PARENT_IDS.includes(incomingItem.parentid)
+ );
+ },
+
+ emptyChangeset() {
+ return new BookmarksChangeset();
+ },
+
+ async _apply() {
+ let buf = await this._store.ensureOpenMirror();
+ let watchdog = this._newWatchdog();
+ watchdog.start(BOOKMARK_APPLY_TIMEOUT_MS);
+
+ try {
+ let recordsToUpload = await buf.apply({
+ remoteTimeSeconds: lazy.Resource.serverTime,
+ signal: watchdog.signal,
+ });
+ this._modified.replace(recordsToUpload);
+ } finally {
+ watchdog.stop();
+ if (watchdog.abortReason) {
+ this._log.warn(`Aborting bookmark merge: ${watchdog.abortReason}`);
+ }
+ }
+ },
+
+ async _processIncoming(newitems) {
+ await super._processIncoming(newitems);
+ await this._apply();
+ },
+
+ async _reconcile(item) {
+ return true;
+ },
+
+ async _createRecord(id) {
+ let record = await this._doCreateRecord(id);
+ if (!record.deleted) {
+ // Set hasDupe on all (non-deleted) records since we don't use it and we
+ // want to minimize the risk of older clients corrupting records. Note
+ // that the SyncedBookmarksMirror sets it for all records that it created,
+ // but we would like to ensure that weakly uploaded records are marked as
+ // hasDupe as well.
+ record.hasDupe = true;
+ }
+ return record;
+ },
+
+ async _doCreateRecord(id) {
+ let change = this._modified.changes[id];
+ if (!change) {
+ this._log.error(
+ "Creating record for item ${id} not in strong changeset",
+ { id }
+ );
+ throw new TypeError("Can't create record for unchanged item");
+ }
+ let record = this._recordFromCleartext(id, change.cleartext);
+ record.sortindex = await this._store._calculateIndex(record);
+ return record;
+ },
+
+ _recordFromCleartext(id, cleartext) {
+ let recordObj = getTypeObject(cleartext.type);
+ if (!recordObj) {
+ this._log.warn(
+ "Creating record for item ${id} with unknown type ${type}",
+ { id, type: cleartext.type }
+ );
+ recordObj = PlacesItem;
+ }
+ let record = new recordObj(this.name, id);
+ record.cleartext = cleartext;
+ return record;
+ },
+
+ async pullChanges() {
+ return {};
+ },
+
+ /**
+ * Writes successfully uploaded records back to the mirror, so that the
+ * mirror matches the server. We update the mirror before updating Places,
+ * which has implications for interrupted syncs.
+ *
+ * 1. Sync interrupted during upload; server doesn't support atomic uploads.
+ * We'll download and reapply everything that we uploaded before the
+ * interruption. All locally changed items retain their change counters.
+ * 2. Sync interrupted during upload; atomic uploads enabled. The server
+ * discards the batch. All changed local items retain their change
+ * counters, so the next sync resumes cleanly.
+ * 3. Sync interrupted during upload; outgoing records can't fit in a single
+ * batch. We'll download and reapply all records through the most recent
+ * committed batch. This is a variation of (1).
+ * 4. Sync interrupted after we update the mirror, but before cleanup. The
+ * mirror matches the server, but locally changed items retain their change
+ * counters. Reuploading them on the next sync should be idempotent, though
+ * unnecessary. If another client makes a conflicting remote change before
+ * we sync again, we may incorrectly prefer the local state.
+ * 5. Sync completes successfully. We'll update the mirror, and reset the
+ * change counters for all items.
+ */
+ async _onRecordsWritten(succeeded, failed, serverModifiedTime) {
+ let records = [];
+ for (let id of succeeded) {
+ let change = this._modified.changes[id];
+ if (!change) {
+ // TODO (Bug 1433178): Write weakly uploaded records back to the mirror.
+ this._log.info("Uploaded record not in strong changeset", id);
+ continue;
+ }
+ if (!change.synced) {
+ this._log.info("Record in strong changeset not uploaded", id);
+ continue;
+ }
+ let cleartext = change.cleartext;
+ if (!cleartext) {
+ this._log.error(
+ "Missing Sync record cleartext for ${id} in ${change}",
+ { id, change }
+ );
+ throw new TypeError("Missing cleartext for uploaded Sync record");
+ }
+ let record = this._recordFromCleartext(id, cleartext);
+ record.modified = serverModifiedTime;
+ records.push(record);
+ }
+ let buf = await this._store.ensureOpenMirror();
+ await buf.store(records, { needsMerge: false });
+ },
+
+ async finalize() {
+ await super.finalize();
+ await this._store.finalize();
+ },
+};
+
+Object.setPrototypeOf(BookmarksEngine.prototype, SyncEngine.prototype);
+
+/**
+ * The bookmarks store delegates to the mirror for staging and applying
+ * records. Most `Store` methods intentionally remain abstract, so you can't use
+ * this store to create or update bookmarks in Places. All changes must go
+ * through the mirror, which takes care of merging and producing a valid tree.
+ */
+function BookmarksStore(name, engine) {
+ Store.call(this, name, engine);
+}
+
+BookmarksStore.prototype = {
+ _openMirrorPromise: null,
+
+ // For tests.
+ _batchChunkSize: 500,
+
+ // Create a record starting from the weave id (places guid)
+ async createRecord(id, collection) {
+ let item = await lazy.PlacesSyncUtils.bookmarks.fetch(id);
+ if (!item) {
+ // deleted item
+ let record = new PlacesItem(collection, id);
+ record.deleted = true;
+ return record;
+ }
+
+ let recordObj = getTypeObject(item.kind);
+ if (!recordObj) {
+ this._log.warn("Unknown item type, cannot serialize: " + item.kind);
+ recordObj = PlacesItem;
+ }
+ let record = new recordObj(collection, id);
+ record.fromSyncBookmark(item);
+
+ record.sortindex = await this._calculateIndex(record);
+
+ return record;
+ },
+
+ async _calculateIndex(record) {
+ // Ensure folders have a very high sort index so they're not synced last.
+ if (record.type == "folder") {
+ return FOLDER_SORTINDEX;
+ }
+
+ // For anything directly under the toolbar, give it a boost of more than an
+ // unvisited bookmark
+ let index = 0;
+ if (record.parentid == "toolbar") {
+ index += 150;
+ }
+
+ // Add in the bookmark's frecency if we have something.
+ if (record.bmkUri != null) {
+ let frecency = FRECENCY_UNKNOWN;
+ try {
+ frecency = await lazy.PlacesSyncUtils.history.fetchURLFrecency(
+ record.bmkUri
+ );
+ } catch (ex) {
+ this._log.warn(
+ `Failed to fetch frecency for ${record.id}; assuming default`,
+ ex
+ );
+ this._log.trace("Record {id} has invalid URL ${bmkUri}", record);
+ }
+ if (frecency != FRECENCY_UNKNOWN) {
+ index += frecency;
+ }
+ }
+
+ return index;
+ },
+
+ async wipe() {
+ // Save a backup before clearing out all bookmarks.
+ await lazy.PlacesBackups.create(null, true);
+ await lazy.PlacesSyncUtils.bookmarks.wipe();
+ },
+
+ ensureOpenMirror() {
+ if (!this._openMirrorPromise) {
+ this._openMirrorPromise = this._openMirror().catch(err => {
+ // We may have failed to open the mirror temporarily; for example, if
+ // the database is locked. Clear the promise so that subsequent
+ // `ensureOpenMirror` calls can try to open the mirror again.
+ this._openMirrorPromise = null;
+ throw err;
+ });
+ }
+ return this._openMirrorPromise;
+ },
+
+ async _openMirror() {
+ let mirrorPath = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ "bookmarks.sqlite"
+ );
+ await IOUtils.makeDirectory(PathUtils.parent(mirrorPath), {
+ createAncestors: true,
+ });
+
+ return lazy.SyncedBookmarksMirror.open({
+ path: mirrorPath,
+ recordStepTelemetry: (name, took, counts) => {
+ lazy.Observers.notify(
+ "weave:engine:sync:step",
+ {
+ name,
+ took,
+ counts,
+ },
+ this.name
+ );
+ },
+ recordValidationTelemetry: (took, checked, problems) => {
+ lazy.Observers.notify(
+ "weave:engine:validate:finish",
+ {
+ version: BOOKMARK_VALIDATOR_VERSION,
+ took,
+ checked,
+ problems,
+ },
+ this.name
+ );
+ },
+ });
+ },
+
+ async applyIncomingBatch(records, countTelemetry) {
+ let buf = await this.ensureOpenMirror();
+ for (let chunk of lazy.PlacesUtils.chunkArray(
+ records,
+ this._batchChunkSize
+ )) {
+ await buf.store(chunk);
+ }
+ // Array of failed records.
+ return [];
+ },
+
+ async applyIncoming(record) {
+ let buf = await this.ensureOpenMirror();
+ await buf.store([record]);
+ },
+
+ async finalize() {
+ if (!this._openMirrorPromise) {
+ return;
+ }
+ let buf = await this._openMirrorPromise;
+ await buf.finalize();
+ },
+};
+
+Object.setPrototypeOf(BookmarksStore.prototype, Store.prototype);
+
+// The bookmarks tracker is a special flower. Instead of listening for changes
+// via observer notifications, it queries Places for the set of items that have
+// changed since the last sync. Because it's a "pull-based" tracker, it ignores
+// all concepts of "add a changed ID." However, it still registers an observer
+// to bump the score, so that changed bookmarks are synced immediately.
+function BookmarksTracker(name, engine) {
+ Tracker.call(this, name, engine);
+}
+BookmarksTracker.prototype = {
+ onStart() {
+ this._placesListener = new PlacesWeakCallbackWrapper(
+ this.handlePlacesEvents.bind(this)
+ );
+ lazy.PlacesUtils.observers.addListener(
+ [
+ "bookmark-added",
+ "bookmark-removed",
+ "bookmark-moved",
+ "bookmark-guid-changed",
+ "bookmark-keyword-changed",
+ "bookmark-tags-changed",
+ "bookmark-time-changed",
+ "bookmark-title-changed",
+ "bookmark-url-changed",
+ ],
+ this._placesListener
+ );
+ Svc.Obs.add("bookmarks-restore-begin", this);
+ Svc.Obs.add("bookmarks-restore-success", this);
+ Svc.Obs.add("bookmarks-restore-failed", this);
+ },
+
+ onStop() {
+ lazy.PlacesUtils.observers.removeListener(
+ [
+ "bookmark-added",
+ "bookmark-removed",
+ "bookmark-moved",
+ "bookmark-guid-changed",
+ "bookmark-keyword-changed",
+ "bookmark-tags-changed",
+ "bookmark-time-changed",
+ "bookmark-title-changed",
+ "bookmark-url-changed",
+ ],
+ this._placesListener
+ );
+ Svc.Obs.remove("bookmarks-restore-begin", this);
+ Svc.Obs.remove("bookmarks-restore-success", this);
+ Svc.Obs.remove("bookmarks-restore-failed", this);
+ },
+
+ async getChangedIDs() {
+ return lazy.PlacesSyncUtils.bookmarks.pullChanges();
+ },
+
+ observe(subject, topic, data) {
+ switch (topic) {
+ case "bookmarks-restore-begin":
+ this._log.debug("Ignoring changes from importing bookmarks.");
+ break;
+ case "bookmarks-restore-success":
+ this._log.debug("Tracking all items on successful import.");
+
+ if (data == "json") {
+ this._log.debug(
+ "Restore succeeded: wiping server and other clients."
+ );
+ // Trigger an immediate sync. `ensureCurrentSyncID` will notice we
+ // restored, wipe the server and other clients, reset the sync ID, and
+ // upload the restored tree.
+ this.score += SCORE_INCREMENT_XLARGE;
+ } else {
+ // "html", "html-initial", or "json-append"
+ this._log.debug("Import succeeded.");
+ }
+ break;
+ case "bookmarks-restore-failed":
+ this._log.debug("Tracking all items on failed import.");
+ break;
+ }
+ },
+
+ QueryInterface: ChromeUtils.generateQI(["nsISupportsWeakReference"]),
+
+ /* Every add/remove/change will trigger a sync for MULTI_DEVICE */
+ _upScore: function BMT__upScore() {
+ this.score += SCORE_INCREMENT_XLARGE;
+ },
+
+ handlePlacesEvents(events) {
+ for (let event of events) {
+ switch (event.type) {
+ case "bookmark-added":
+ case "bookmark-removed":
+ case "bookmark-moved":
+ case "bookmark-keyword-changed":
+ case "bookmark-tags-changed":
+ case "bookmark-time-changed":
+ case "bookmark-title-changed":
+ case "bookmark-url-changed":
+ if (lazy.IGNORED_SOURCES.includes(event.source)) {
+ continue;
+ }
+
+ this._log.trace(`'${event.type}': ${event.id}`);
+ this._upScore();
+ break;
+ case "bookmark-guid-changed":
+ if (event.source !== lazy.PlacesUtils.bookmarks.SOURCES.SYNC) {
+ this._log.warn(
+ "The source of bookmark-guid-changed event shoud be sync."
+ );
+ continue;
+ }
+
+ this._log.trace(`'${event.type}': ${event.id}`);
+ this._upScore();
+ break;
+ case "purge-caches":
+ this._log.trace("purge-caches");
+ this._upScore();
+ break;
+ }
+ }
+ },
+};
+
+Object.setPrototypeOf(BookmarksTracker.prototype, Tracker.prototype);
+
+/**
+ * A changeset that stores extra metadata in a change record for each ID. The
+ * engine updates this metadata when uploading Sync records, and writes it back
+ * to Places in `BookmarksEngine#trackRemainingChanges`.
+ *
+ * The `synced` property on a change record means its corresponding item has
+ * been uploaded, and we should pretend it doesn't exist in the changeset.
+ */
+class BookmarksChangeset extends Changeset {
+ // Only `_reconcile` calls `getModifiedTimestamp` and `has`, and the engine
+ // does its own reconciliation.
+ getModifiedTimestamp(id) {
+ throw new Error("Don't use timestamps to resolve bookmark conflicts");
+ }
+
+ has(id) {
+ throw new Error("Don't use the changeset to resolve bookmark conflicts");
+ }
+
+ delete(id) {
+ let change = this.changes[id];
+ if (change) {
+ // Mark the change as synced without removing it from the set. We do this
+ // so that we can update Places in `trackRemainingChanges`.
+ change.synced = true;
+ }
+ }
+
+ ids() {
+ let results = new Set();
+ for (let id in this.changes) {
+ if (!this.changes[id].synced) {
+ results.add(id);
+ }
+ }
+ return [...results];
+ }
+}
diff --git a/services/sync/modules/engines/clients.sys.mjs b/services/sync/modules/engines/clients.sys.mjs
new file mode 100644
index 0000000000..f3cebc8a54
--- /dev/null
+++ b/services/sync/modules/engines/clients.sys.mjs
@@ -0,0 +1,1123 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * How does the clients engine work?
+ *
+ * - We use 2 files - commands.json and commands-syncing.json.
+ *
+ * - At sync upload time, we attempt a rename of commands.json to
+ * commands-syncing.json, and ignore errors (helps for crash during sync!).
+ * - We load commands-syncing.json and stash the contents in
+ * _currentlySyncingCommands which lives for the duration of the upload process.
+ * - We use _currentlySyncingCommands to build the outgoing records
+ * - Immediately after successful upload, we delete commands-syncing.json from
+ * disk (and clear _currentlySyncingCommands). We reconcile our local records
+ * with what we just wrote in the server, and add failed IDs commands
+ * back in commands.json
+ * - Any time we need to "save" a command for future syncs, we load
+ * commands.json, update it, and write it back out.
+ */
+
+import { Async } from "resource://services-common/async.sys.mjs";
+
+import {
+ DEVICE_TYPE_DESKTOP,
+ DEVICE_TYPE_MOBILE,
+ SINGLE_USER_THRESHOLD,
+ SYNC_API_VERSION,
+} from "resource://services-sync/constants.sys.mjs";
+
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+const lazy = {};
+
+XPCOMUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+const { PREF_ACCOUNT_ROOT } = ChromeUtils.import(
+ "resource://gre/modules/FxAccountsCommon.js"
+);
+
+const CLIENTS_TTL = 15552000; // 180 days
+const CLIENTS_TTL_REFRESH = 604800; // 7 days
+const STALE_CLIENT_REMOTE_AGE = 604800; // 7 days
+
+// TTL of the message sent to another device when sending a tab
+const NOTIFY_TAB_SENT_TTL_SECS = 1 * 3600; // 1 hour
+
+// How often we force a refresh of the FxA device list.
+const REFRESH_FXA_DEVICE_INTERVAL_MS = 2 * 60 * 60 * 1000; // 2 hours
+
+// Reasons behind sending collection_changed push notifications.
+const COLLECTION_MODIFIED_REASON_SENDTAB = "sendtab";
+const COLLECTION_MODIFIED_REASON_FIRSTSYNC = "firstsync";
+
+const SUPPORTED_PROTOCOL_VERSIONS = [SYNC_API_VERSION];
+const LAST_MODIFIED_ON_PROCESS_COMMAND_PREF =
+ "services.sync.clients.lastModifiedOnProcessCommands";
+
+function hasDupeCommand(commands, action) {
+ if (!commands) {
+ return false;
+ }
+ return commands.some(
+ other =>
+ other.command == action.command &&
+ Utils.deepEquals(other.args, action.args)
+ );
+}
+
+export function ClientsRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+ClientsRec.prototype = {
+ _logName: "Sync.Record.Clients",
+ ttl: CLIENTS_TTL,
+};
+Object.setPrototypeOf(ClientsRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(ClientsRec, "cleartext", [
+ "name",
+ "type",
+ "commands",
+ "version",
+ "protocols",
+ "formfactor",
+ "os",
+ "appPackage",
+ "application",
+ "device",
+ "fxaDeviceId",
+]);
+
+export function ClientEngine(service) {
+ SyncEngine.call(this, "Clients", service);
+
+ this.fxAccounts = lazy.fxAccounts;
+ this.addClientCommandQueue = Async.asyncQueueCaller(this._log);
+ Utils.defineLazyIDProperty(this, "localID", "services.sync.client.GUID");
+}
+
+ClientEngine.prototype = {
+ _storeObj: ClientStore,
+ _recordObj: ClientsRec,
+ _trackerObj: ClientsTracker,
+ allowSkippedRecord: false,
+ _knownStaleFxADeviceIds: null,
+ _lastDeviceCounts: null,
+ _lastFxaDeviceRefresh: 0,
+
+ async initialize() {
+ // Reset the last sync timestamp on every startup so that we fetch all clients
+ await this.resetLastSync();
+ },
+
+ // These two properties allow us to avoid replaying the same commands
+ // continuously if we cannot manage to upload our own record.
+ _localClientLastModified: 0,
+ get _lastModifiedOnProcessCommands() {
+ return Services.prefs.getIntPref(LAST_MODIFIED_ON_PROCESS_COMMAND_PREF, -1);
+ },
+
+ set _lastModifiedOnProcessCommands(value) {
+ Services.prefs.setIntPref(LAST_MODIFIED_ON_PROCESS_COMMAND_PREF, value);
+ },
+
+ get isFirstSync() {
+ return !this.lastRecordUpload;
+ },
+
+ // Always sync client data as it controls other sync behavior
+ get enabled() {
+ return true;
+ },
+
+ get lastRecordUpload() {
+ return Svc.Prefs.get(this.name + ".lastRecordUpload", 0);
+ },
+ set lastRecordUpload(value) {
+ Svc.Prefs.set(this.name + ".lastRecordUpload", Math.floor(value));
+ },
+
+ get remoteClients() {
+ // return all non-stale clients for external consumption.
+ return Object.values(this._store._remoteClients).filter(v => !v.stale);
+ },
+
+ remoteClient(id) {
+ let client = this._store._remoteClients[id];
+ return client && !client.stale ? client : null;
+ },
+
+ remoteClientExists(id) {
+ return !!this.remoteClient(id);
+ },
+
+ // Aggregate some stats on the composition of clients on this account
+ get stats() {
+ let stats = {
+ hasMobile: this.localType == DEVICE_TYPE_MOBILE,
+ names: [this.localName],
+ numClients: 1,
+ };
+
+ for (let id in this._store._remoteClients) {
+ let { name, type, stale } = this._store._remoteClients[id];
+ if (!stale) {
+ stats.hasMobile = stats.hasMobile || type == DEVICE_TYPE_MOBILE;
+ stats.names.push(name);
+ stats.numClients++;
+ }
+ }
+
+ return stats;
+ },
+
+ /**
+ * Obtain information about device types.
+ *
+ * Returns a Map of device types to integer counts. Guaranteed to include
+ * "desktop" (which will have at least 1 - this device) and "mobile" (which
+ * may have zero) counts. It almost certainly will include only these 2.
+ */
+ get deviceTypes() {
+ let counts = new Map();
+
+ counts.set(this.localType, 1); // currently this must be DEVICE_TYPE_DESKTOP
+ counts.set(DEVICE_TYPE_MOBILE, 0);
+
+ for (let id in this._store._remoteClients) {
+ let record = this._store._remoteClients[id];
+ if (record.stale) {
+ continue; // pretend "stale" records don't exist.
+ }
+ let type = record.type;
+ if (!counts.has(type)) {
+ counts.set(type, 0);
+ }
+
+ counts.set(type, counts.get(type) + 1);
+ }
+
+ return counts;
+ },
+
+ get brandName() {
+ let brand = Services.strings.createBundle(
+ "chrome://branding/locale/brand.properties"
+ );
+ return brand.GetStringFromName("brandShortName");
+ },
+
+ get localName() {
+ return this.fxAccounts.device.getLocalName();
+ },
+ set localName(value) {
+ this.fxAccounts.device.setLocalName(value);
+ },
+
+ get localType() {
+ return this.fxAccounts.device.getLocalType();
+ },
+
+ getClientName(id) {
+ if (id == this.localID) {
+ return this.localName;
+ }
+ let client = this._store._remoteClients[id];
+ if (!client) {
+ return "";
+ }
+ // Sometimes the sync clients don't always correctly update the device name
+ // However FxA always does, so try to pull the name from there first
+ let fxaDevice = this.fxAccounts.device.recentDeviceList?.find(
+ device => device.id === client.fxaDeviceId
+ );
+
+ // should be very rare, but could happen if we have yet to fetch devices,
+ // or the client recently disconnected
+ if (!fxaDevice) {
+ this._log.warn(
+ "Couldn't find associated FxA device, falling back to client name"
+ );
+ return client.name;
+ }
+ return fxaDevice.name;
+ },
+
+ getClientFxaDeviceId(id) {
+ if (this._store._remoteClients[id]) {
+ return this._store._remoteClients[id].fxaDeviceId;
+ }
+ return null;
+ },
+
+ getClientByFxaDeviceId(fxaDeviceId) {
+ for (let id in this._store._remoteClients) {
+ let client = this._store._remoteClients[id];
+ if (client.stale) {
+ continue;
+ }
+ if (client.fxaDeviceId == fxaDeviceId) {
+ return client;
+ }
+ }
+ return null;
+ },
+
+ getClientType(id) {
+ const client = this._store._remoteClients[id];
+ if (client.type == DEVICE_TYPE_DESKTOP) {
+ return "desktop";
+ }
+ if (client.formfactor && client.formfactor.includes("tablet")) {
+ return "tablet";
+ }
+ return "phone";
+ },
+
+ async _readCommands() {
+ let commands = await Utils.jsonLoad("commands", this);
+ return commands || {};
+ },
+
+ /**
+ * Low level function, do not use directly (use _addClientCommand instead).
+ */
+ async _saveCommands(commands) {
+ try {
+ await Utils.jsonSave("commands", this, commands);
+ } catch (error) {
+ this._log.error("Failed to save JSON outgoing commands", error);
+ }
+ },
+
+ async _prepareCommandsForUpload() {
+ try {
+ await Utils.jsonMove("commands", "commands-syncing", this);
+ } catch (e) {
+ // Ignore errors
+ }
+ let commands = await Utils.jsonLoad("commands-syncing", this);
+ return commands || {};
+ },
+
+ async _deleteUploadedCommands() {
+ delete this._currentlySyncingCommands;
+ try {
+ await Utils.jsonRemove("commands-syncing", this);
+ } catch (err) {
+ this._log.error("Failed to delete syncing-commands file", err);
+ }
+ },
+
+ // Gets commands for a client we are yet to write to the server. Doesn't
+ // include commands for that client which are already on the server.
+ // We should rename this!
+ async getClientCommands(clientId) {
+ const allCommands = await this._readCommands();
+ return allCommands[clientId] || [];
+ },
+
+ async removeLocalCommand(command) {
+ // the implementation of this engine is such that adding a command to
+ // the local client is how commands are deleted! ¯\_(ツ)_/¯
+ await this._addClientCommand(this.localID, command);
+ },
+
+ async _addClientCommand(clientId, command) {
+ this.addClientCommandQueue.enqueueCall(async () => {
+ try {
+ const localCommands = await this._readCommands();
+ const localClientCommands = localCommands[clientId] || [];
+ const remoteClient = this._store._remoteClients[clientId];
+ let remoteClientCommands = [];
+ if (remoteClient && remoteClient.commands) {
+ remoteClientCommands = remoteClient.commands;
+ }
+ const clientCommands = localClientCommands.concat(remoteClientCommands);
+ if (hasDupeCommand(clientCommands, command)) {
+ return false;
+ }
+ localCommands[clientId] = localClientCommands.concat(command);
+ await this._saveCommands(localCommands);
+ return true;
+ } catch (e) {
+ // Failing to save a command should not "break the queue" of pending operations.
+ this._log.error(e);
+ return false;
+ }
+ });
+
+ return this.addClientCommandQueue.promiseCallsComplete();
+ },
+
+ async _removeClientCommands(clientId) {
+ const allCommands = await this._readCommands();
+ delete allCommands[clientId];
+ await this._saveCommands(allCommands);
+ },
+
+ async updateKnownStaleClients() {
+ this._log.debug("Updating the known stale clients");
+ // _fetchFxADevices side effect updates this._knownStaleFxADeviceIds.
+ await this._fetchFxADevices();
+ let localFxADeviceId = await lazy.fxAccounts.device.getLocalId();
+ // Process newer records first, so that if we hit a record with a device ID
+ // we've seen before, we can mark it stale immediately.
+ let clientList = Object.values(this._store._remoteClients).sort(
+ (a, b) => b.serverLastModified - a.serverLastModified
+ );
+ let seenDeviceIds = new Set([localFxADeviceId]);
+ for (let client of clientList) {
+ // Clients might not have an `fxaDeviceId` if they fail the FxA
+ // registration process.
+ if (!client.fxaDeviceId) {
+ continue;
+ }
+ if (this._knownStaleFxADeviceIds.includes(client.fxaDeviceId)) {
+ this._log.info(
+ `Hiding stale client ${client.id} - in known stale clients list`
+ );
+ client.stale = true;
+ } else if (seenDeviceIds.has(client.fxaDeviceId)) {
+ this._log.info(
+ `Hiding stale client ${client.id}` +
+ ` - duplicate device id ${client.fxaDeviceId}`
+ );
+ client.stale = true;
+ } else {
+ seenDeviceIds.add(client.fxaDeviceId);
+ }
+ }
+ },
+
+ async _fetchFxADevices() {
+ // We only force a refresh periodically to keep the load on the servers
+ // down, and because we expect FxA to have received a push message in
+ // most cases when the FxA device list would have changed. For this reason
+ // we still go ahead and check the stale list even if we didn't force a
+ // refresh.
+ let now = this.fxAccounts._internal.now(); // tests mock this .now() impl.
+ if (now - REFRESH_FXA_DEVICE_INTERVAL_MS > this._lastFxaDeviceRefresh) {
+ this._lastFxaDeviceRefresh = now;
+ try {
+ await this.fxAccounts.device.refreshDeviceList();
+ } catch (e) {
+ this._log.error("Could not refresh the FxA device list", e);
+ }
+ }
+
+ // We assume that clients not present in the FxA Device Manager list have been
+ // disconnected and so are stale
+ this._log.debug("Refreshing the known stale clients list");
+ let localClients = Object.values(this._store._remoteClients)
+ .filter(client => client.fxaDeviceId) // iOS client records don't have fxaDeviceId
+ .map(client => client.fxaDeviceId);
+ const fxaClients = this.fxAccounts.device.recentDeviceList
+ ? this.fxAccounts.device.recentDeviceList.map(device => device.id)
+ : [];
+ this._knownStaleFxADeviceIds = Utils.arraySub(localClients, fxaClients);
+ },
+
+ async _syncStartup() {
+ // Reupload new client record periodically.
+ if (Date.now() / 1000 - this.lastRecordUpload > CLIENTS_TTL_REFRESH) {
+ await this._tracker.addChangedID(this.localID);
+ }
+ return SyncEngine.prototype._syncStartup.call(this);
+ },
+
+ async _processIncoming() {
+ // Fetch all records from the server.
+ await this.resetLastSync();
+ this._incomingClients = {};
+ try {
+ await SyncEngine.prototype._processIncoming.call(this);
+ // Update FxA Device list.
+ await this._fetchFxADevices();
+ // Since clients are synced unconditionally, any records in the local store
+ // that don't exist on the server must be for disconnected clients. Remove
+ // them, so that we don't upload records with commands for clients that will
+ // never see them. We also do this to filter out stale clients from the
+ // tabs collection, since showing their list of tabs is confusing.
+ for (let id in this._store._remoteClients) {
+ if (!this._incomingClients[id]) {
+ this._log.info(`Removing local state for deleted client ${id}`);
+ await this._removeRemoteClient(id);
+ }
+ }
+ let localFxADeviceId = await lazy.fxAccounts.device.getLocalId();
+ // Bug 1264498: Mobile clients don't remove themselves from the clients
+ // collection when the user disconnects Sync, so we mark as stale clients
+ // with the same name that haven't synced in over a week.
+ // (Note we can't simply delete them, or we re-apply them next sync - see
+ // bug 1287687)
+ this._localClientLastModified = Math.round(
+ this._incomingClients[this.localID]
+ );
+ delete this._incomingClients[this.localID];
+ let names = new Set([this.localName]);
+ let seenDeviceIds = new Set([localFxADeviceId]);
+ let idToLastModifiedList = Object.entries(this._incomingClients).sort(
+ (a, b) => b[1] - a[1]
+ );
+ for (let [id, serverLastModified] of idToLastModifiedList) {
+ let record = this._store._remoteClients[id];
+ // stash the server last-modified time on the record.
+ record.serverLastModified = serverLastModified;
+ if (
+ record.fxaDeviceId &&
+ this._knownStaleFxADeviceIds.includes(record.fxaDeviceId)
+ ) {
+ this._log.info(
+ `Hiding stale client ${id} - in known stale clients list`
+ );
+ record.stale = true;
+ }
+ if (!names.has(record.name)) {
+ if (record.fxaDeviceId) {
+ seenDeviceIds.add(record.fxaDeviceId);
+ }
+ names.add(record.name);
+ continue;
+ }
+ let remoteAge = Resource.serverTime - this._incomingClients[id];
+ if (remoteAge > STALE_CLIENT_REMOTE_AGE) {
+ this._log.info(`Hiding stale client ${id} with age ${remoteAge}`);
+ record.stale = true;
+ continue;
+ }
+ if (record.fxaDeviceId && seenDeviceIds.has(record.fxaDeviceId)) {
+ this._log.info(
+ `Hiding stale client ${record.id}` +
+ ` - duplicate device id ${record.fxaDeviceId}`
+ );
+ record.stale = true;
+ } else if (record.fxaDeviceId) {
+ seenDeviceIds.add(record.fxaDeviceId);
+ }
+ }
+ } finally {
+ this._incomingClients = null;
+ }
+ },
+
+ async _uploadOutgoing() {
+ this._currentlySyncingCommands = await this._prepareCommandsForUpload();
+ const clientWithPendingCommands = Object.keys(
+ this._currentlySyncingCommands
+ );
+ for (let clientId of clientWithPendingCommands) {
+ if (this._store._remoteClients[clientId] || this.localID == clientId) {
+ this._modified.set(clientId, 0);
+ }
+ }
+ let updatedIDs = this._modified.ids();
+ await SyncEngine.prototype._uploadOutgoing.call(this);
+ // Record the response time as the server time for each item we uploaded.
+ let lastSync = await this.getLastSync();
+ for (let id of updatedIDs) {
+ if (id == this.localID) {
+ this.lastRecordUpload = lastSync;
+ } else {
+ this._store._remoteClients[id].serverLastModified = lastSync;
+ }
+ }
+ },
+
+ async _onRecordsWritten(succeeded, failed) {
+ // Reconcile the status of the local records with what we just wrote on the
+ // server
+ for (let id of succeeded) {
+ const commandChanges = this._currentlySyncingCommands[id];
+ if (id == this.localID) {
+ if (this.isFirstSync) {
+ this._log.info(
+ "Uploaded our client record for the first time, notifying other clients."
+ );
+ this._notifyClientRecordUploaded();
+ }
+ if (this.localCommands) {
+ this.localCommands = this.localCommands.filter(
+ command => !hasDupeCommand(commandChanges, command)
+ );
+ }
+ } else {
+ const clientRecord = this._store._remoteClients[id];
+ if (!commandChanges || !clientRecord) {
+ // should be impossible, else we wouldn't have been writing it.
+ this._log.warn(
+ "No command/No record changes for a client we uploaded"
+ );
+ continue;
+ }
+ // fixup the client record, so our copy of _remoteClients matches what we uploaded.
+ this._store._remoteClients[id] = await this._store.createRecord(id);
+ // we could do better and pass the reference to the record we just uploaded,
+ // but this will do for now
+ }
+ }
+
+ // Re-add failed commands
+ for (let id of failed) {
+ const commandChanges = this._currentlySyncingCommands[id];
+ if (!commandChanges) {
+ continue;
+ }
+ await this._addClientCommand(id, commandChanges);
+ }
+
+ await this._deleteUploadedCommands();
+
+ // Notify other devices that their own client collection changed
+ const idsToNotify = succeeded.reduce((acc, id) => {
+ if (id == this.localID) {
+ return acc;
+ }
+ const fxaDeviceId = this.getClientFxaDeviceId(id);
+ return fxaDeviceId ? acc.concat(fxaDeviceId) : acc;
+ }, []);
+ if (idsToNotify.length) {
+ this._notifyOtherClientsModified(idsToNotify);
+ }
+ },
+
+ _notifyOtherClientsModified(ids) {
+ // We are not waiting on this promise on purpose.
+ this._notifyCollectionChanged(
+ ids,
+ NOTIFY_TAB_SENT_TTL_SECS,
+ COLLECTION_MODIFIED_REASON_SENDTAB
+ );
+ },
+
+ _notifyClientRecordUploaded() {
+ // We are not waiting on this promise on purpose.
+ this._notifyCollectionChanged(
+ null,
+ 0,
+ COLLECTION_MODIFIED_REASON_FIRSTSYNC
+ );
+ },
+
+ /**
+ * @param {?string[]} ids FxA Client IDs to notify. null means everyone else.
+ * @param {number} ttl TTL of the push notification.
+ * @param {string} reason Reason for sending this push notification.
+ */
+ async _notifyCollectionChanged(ids, ttl, reason) {
+ const message = {
+ version: 1,
+ command: "sync:collection_changed",
+ data: {
+ collections: ["clients"],
+ reason,
+ },
+ };
+ let excludedIds = null;
+ if (!ids) {
+ const localFxADeviceId = await lazy.fxAccounts.device.getLocalId();
+ excludedIds = [localFxADeviceId];
+ }
+ try {
+ await this.fxAccounts.notifyDevices(ids, excludedIds, message, ttl);
+ } catch (e) {
+ this._log.error("Could not notify of changes in the collection", e);
+ }
+ },
+
+ async _syncFinish() {
+ // Record histograms for our device types, and also write them to a pref
+ // so non-histogram telemetry (eg, UITelemetry) and the sync scheduler
+ // has easy access to them, and so they are accurate even before we've
+ // successfully synced the first time after startup.
+ let deviceTypeCounts = this.deviceTypes;
+ for (let [deviceType, count] of deviceTypeCounts) {
+ let hid;
+ let prefName = this.name + ".devices.";
+ switch (deviceType) {
+ case DEVICE_TYPE_DESKTOP:
+ hid = "WEAVE_DEVICE_COUNT_DESKTOP";
+ prefName += "desktop";
+ break;
+ case DEVICE_TYPE_MOBILE:
+ hid = "WEAVE_DEVICE_COUNT_MOBILE";
+ prefName += "mobile";
+ break;
+ default:
+ this._log.warn(
+ `Unexpected deviceType "${deviceType}" recording device telemetry.`
+ );
+ continue;
+ }
+ Services.telemetry.getHistogramById(hid).add(count);
+ // Optimization: only write the pref if it changed since our last sync.
+ if (
+ this._lastDeviceCounts == null ||
+ this._lastDeviceCounts.get(prefName) != count
+ ) {
+ Svc.Prefs.set(prefName, count);
+ }
+ }
+ this._lastDeviceCounts = deviceTypeCounts;
+ return SyncEngine.prototype._syncFinish.call(this);
+ },
+
+ async _reconcile(item) {
+ // Every incoming record is reconciled, so we use this to track the
+ // contents of the collection on the server.
+ this._incomingClients[item.id] = item.modified;
+
+ if (!(await this._store.itemExists(item.id))) {
+ return true;
+ }
+ // Clients are synced unconditionally, so we'll always have new records.
+ // Unfortunately, this will cause the scheduler to use the immediate sync
+ // interval for the multi-device case, instead of the active interval. We
+ // work around this by updating the record during reconciliation, and
+ // returning false to indicate that the record doesn't need to be applied
+ // later.
+ await this._store.update(item);
+ return false;
+ },
+
+ // Treat reset the same as wiping for locally cached clients
+ async _resetClient() {
+ await this._wipeClient();
+ },
+
+ async _wipeClient() {
+ await SyncEngine.prototype._resetClient.call(this);
+ this._knownStaleFxADeviceIds = null;
+ delete this.localCommands;
+ await this._store.wipe();
+ try {
+ await Utils.jsonRemove("commands", this);
+ } catch (err) {
+ this._log.warn("Could not delete commands.json", err);
+ }
+ try {
+ await Utils.jsonRemove("commands-syncing", this);
+ } catch (err) {
+ this._log.warn("Could not delete commands-syncing.json", err);
+ }
+ },
+
+ async removeClientData() {
+ let res = this.service.resource(this.engineURL + "/" + this.localID);
+ await res.delete();
+ },
+
+ // Override the default behavior to delete bad records from the server.
+ async handleHMACMismatch(item, mayRetry) {
+ this._log.debug("Handling HMAC mismatch for " + item.id);
+
+ let base = await SyncEngine.prototype.handleHMACMismatch.call(
+ this,
+ item,
+ mayRetry
+ );
+ if (base != SyncEngine.kRecoveryStrategy.error) {
+ return base;
+ }
+
+ // It's a bad client record. Save it to be deleted at the end of the sync.
+ this._log.debug("Bad client record detected. Scheduling for deletion.");
+ await this._deleteId(item.id);
+
+ // Neither try again nor error; we're going to delete it.
+ return SyncEngine.kRecoveryStrategy.ignore;
+ },
+
+ /**
+ * A hash of valid commands that the client knows about. The key is a command
+ * and the value is a hash containing information about the command such as
+ * number of arguments, description, and importance (lower importance numbers
+ * indicate higher importance.
+ */
+ _commands: {
+ resetAll: {
+ args: 0,
+ importance: 0,
+ desc: "Clear temporary local data for all engines",
+ },
+ resetEngine: {
+ args: 1,
+ importance: 0,
+ desc: "Clear temporary local data for engine",
+ },
+ wipeEngine: {
+ args: 1,
+ importance: 0,
+ desc: "Delete all client data for engine",
+ },
+ logout: { args: 0, importance: 0, desc: "Log out client" },
+ },
+
+ /**
+ * Sends a command+args pair to a specific client.
+ *
+ * @param command Command string
+ * @param args Array of arguments/data for command
+ * @param clientId Client to send command to
+ */
+ async _sendCommandToClient(command, args, clientId, telemetryExtra) {
+ this._log.trace("Sending " + command + " to " + clientId);
+
+ let client = this._store._remoteClients[clientId];
+ if (!client) {
+ throw new Error("Unknown remote client ID: '" + clientId + "'.");
+ }
+ if (client.stale) {
+ throw new Error("Stale remote client ID: '" + clientId + "'.");
+ }
+
+ let action = {
+ command,
+ args,
+ // We send the flowID to the other client so *it* can report it in its
+ // telemetry - we record it in ours below.
+ flowID: telemetryExtra.flowID,
+ };
+
+ if (await this._addClientCommand(clientId, action)) {
+ this._log.trace(`Client ${clientId} got a new action`, [command, args]);
+ await this._tracker.addChangedID(clientId);
+ try {
+ telemetryExtra.deviceID =
+ this.service.identity.hashedDeviceID(clientId);
+ } catch (_) {}
+
+ this.service.recordTelemetryEvent(
+ "sendcommand",
+ command,
+ undefined,
+ telemetryExtra
+ );
+ } else {
+ this._log.trace(`Client ${clientId} got a duplicate action`, [
+ command,
+ args,
+ ]);
+ }
+ },
+
+ /**
+ * Check if the local client has any remote commands and perform them.
+ *
+ * @return false to abort sync
+ */
+ async processIncomingCommands() {
+ return this._notify("clients:process-commands", "", async function () {
+ if (
+ !this.localCommands ||
+ (this._lastModifiedOnProcessCommands == this._localClientLastModified &&
+ !this.ignoreLastModifiedOnProcessCommands)
+ ) {
+ return true;
+ }
+ this._lastModifiedOnProcessCommands = this._localClientLastModified;
+
+ const clearedCommands = await this._readCommands()[this.localID];
+ const commands = this.localCommands.filter(
+ command => !hasDupeCommand(clearedCommands, command)
+ );
+ let didRemoveCommand = false;
+ // Process each command in order.
+ for (let rawCommand of commands) {
+ let shouldRemoveCommand = true; // most commands are auto-removed.
+ let { command, args, flowID } = rawCommand;
+ this._log.debug("Processing command " + command, args);
+
+ this.service.recordTelemetryEvent(
+ "processcommand",
+ command,
+ undefined,
+ { flowID }
+ );
+
+ let engines = [args[0]];
+ switch (command) {
+ case "resetAll":
+ engines = null;
+ // Fallthrough
+ case "resetEngine":
+ await this.service.resetClient(engines);
+ break;
+ case "wipeEngine":
+ await this.service.wipeClient(engines);
+ break;
+ case "logout":
+ this.service.logout();
+ return false;
+ default:
+ this._log.warn("Received an unknown command: " + command);
+ break;
+ }
+ // Add the command to the "cleared" commands list
+ if (shouldRemoveCommand) {
+ await this.removeLocalCommand(rawCommand);
+ didRemoveCommand = true;
+ }
+ }
+ if (didRemoveCommand) {
+ await this._tracker.addChangedID(this.localID);
+ }
+
+ return true;
+ })();
+ },
+
+ /**
+ * Validates and sends a command to a client or all clients.
+ *
+ * Calling this does not actually sync the command data to the server. If the
+ * client already has the command/args pair, it won't receive a duplicate
+ * command.
+ * This method is async since it writes the command to a file.
+ *
+ * @param command
+ * Command to invoke on remote clients
+ * @param args
+ * Array of arguments to give to the command
+ * @param clientId
+ * Client ID to send command to. If undefined, send to all remote
+ * clients.
+ * @param flowID
+ * A unique identifier used to track success for this operation across
+ * devices.
+ */
+ async sendCommand(command, args, clientId = null, telemetryExtra = {}) {
+ let commandData = this._commands[command];
+ // Don't send commands that we don't know about.
+ if (!commandData) {
+ this._log.error("Unknown command to send: " + command);
+ return;
+ } else if (!args || args.length != commandData.args) {
+ // Don't send a command with the wrong number of arguments.
+ this._log.error(
+ "Expected " +
+ commandData.args +
+ " args for '" +
+ command +
+ "', but got " +
+ args
+ );
+ return;
+ }
+
+ // We allocate a "flowID" here, so it is used for each client.
+ telemetryExtra = Object.assign({}, telemetryExtra); // don't clobber the caller's object
+ if (!telemetryExtra.flowID) {
+ telemetryExtra.flowID = Utils.makeGUID();
+ }
+
+ if (clientId) {
+ await this._sendCommandToClient(command, args, clientId, telemetryExtra);
+ } else {
+ for (let [id, record] of Object.entries(this._store._remoteClients)) {
+ if (!record.stale) {
+ await this._sendCommandToClient(command, args, id, telemetryExtra);
+ }
+ }
+ }
+ },
+
+ async _removeRemoteClient(id) {
+ delete this._store._remoteClients[id];
+ await this._tracker.removeChangedID(id);
+ await this._removeClientCommands(id);
+ this._modified.delete(id);
+ },
+};
+Object.setPrototypeOf(ClientEngine.prototype, SyncEngine.prototype);
+
+function ClientStore(name, engine) {
+ Store.call(this, name, engine);
+}
+ClientStore.prototype = {
+ _remoteClients: {},
+
+ async create(record) {
+ await this.update(record);
+ },
+
+ async update(record) {
+ if (record.id == this.engine.localID) {
+ // Only grab commands from the server; local name/type always wins
+ this.engine.localCommands = record.commands;
+ } else {
+ this._remoteClients[record.id] = record.cleartext;
+ }
+ },
+
+ async createRecord(id, collection) {
+ let record = new ClientsRec(collection, id);
+
+ const commandsChanges = this.engine._currentlySyncingCommands
+ ? this.engine._currentlySyncingCommands[id]
+ : [];
+
+ // Package the individual components into a record for the local client
+ if (id == this.engine.localID) {
+ try {
+ record.fxaDeviceId = await this.engine.fxAccounts.device.getLocalId();
+ } catch (error) {
+ this._log.warn("failed to get fxa device id", error);
+ }
+ record.name = this.engine.localName;
+ record.type = this.engine.localType;
+ record.version = Services.appinfo.version;
+ record.protocols = SUPPORTED_PROTOCOL_VERSIONS;
+
+ // Substract the commands we recorded that we've already executed
+ if (
+ commandsChanges &&
+ commandsChanges.length &&
+ this.engine.localCommands &&
+ this.engine.localCommands.length
+ ) {
+ record.commands = this.engine.localCommands.filter(
+ command => !hasDupeCommand(commandsChanges, command)
+ );
+ }
+
+ // Optional fields.
+ record.os = Services.appinfo.OS; // "Darwin"
+ record.appPackage = Services.appinfo.ID;
+ record.application = this.engine.brandName; // "Nightly"
+
+ // We can't compute these yet.
+ // record.device = ""; // Bug 1100723
+ // record.formfactor = ""; // Bug 1100722
+ } else {
+ record.cleartext = Object.assign({}, this._remoteClients[id]);
+ delete record.cleartext.serverLastModified; // serverLastModified is a local only attribute.
+
+ // Add the commands we have to send
+ if (commandsChanges && commandsChanges.length) {
+ const recordCommands = record.cleartext.commands || [];
+ const newCommands = commandsChanges.filter(
+ command => !hasDupeCommand(recordCommands, command)
+ );
+ record.cleartext.commands = recordCommands.concat(newCommands);
+ }
+
+ if (record.cleartext.stale) {
+ // It's almost certainly a logic error for us to upload a record we
+ // consider stale, so make log noise, but still remove the flag.
+ this._log.error(
+ `Preparing to upload record ${id} that we consider stale`
+ );
+ delete record.cleartext.stale;
+ }
+ }
+ if (record.commands) {
+ const maxPayloadSize =
+ this.engine.service.getMemcacheMaxRecordPayloadSize();
+ let origOrder = new Map(record.commands.map((c, i) => [c, i]));
+ // we sort first by priority, and second by age (indicated by order in the
+ // original list)
+ let commands = record.commands.slice().sort((a, b) => {
+ let infoA = this.engine._commands[a.command];
+ let infoB = this.engine._commands[b.command];
+ // Treat unknown command types as highest priority, to allow us to add
+ // high priority commands in the future without worrying about clients
+ // removing them on each-other unnecessarially.
+ let importA = infoA ? infoA.importance : 0;
+ let importB = infoB ? infoB.importance : 0;
+ // Higher importantance numbers indicate that we care less, so they
+ // go to the end of the list where they'll be popped off.
+ let importDelta = importA - importB;
+ if (importDelta != 0) {
+ return importDelta;
+ }
+ let origIdxA = origOrder.get(a);
+ let origIdxB = origOrder.get(b);
+ // Within equivalent priorities, we put older entries near the end
+ // of the list, so that they are removed first.
+ return origIdxB - origIdxA;
+ });
+ let truncatedCommands = Utils.tryFitItems(commands, maxPayloadSize);
+ if (truncatedCommands.length != record.commands.length) {
+ this._log.warn(
+ `Removing commands from client ${id} (from ${record.commands.length} to ${truncatedCommands.length})`
+ );
+ // Restore original order.
+ record.commands = truncatedCommands.sort(
+ (a, b) => origOrder.get(a) - origOrder.get(b)
+ );
+ }
+ }
+ return record;
+ },
+
+ async itemExists(id) {
+ return id in (await this.getAllIDs());
+ },
+
+ async getAllIDs() {
+ let ids = {};
+ ids[this.engine.localID] = true;
+ for (let id in this._remoteClients) {
+ ids[id] = true;
+ }
+ return ids;
+ },
+
+ async wipe() {
+ this._remoteClients = {};
+ },
+};
+Object.setPrototypeOf(ClientStore.prototype, Store.prototype);
+
+function ClientsTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+ClientsTracker.prototype = {
+ _enabled: false,
+
+ onStart() {
+ Svc.Obs.add("fxaccounts:new_device_id", this.asyncObserver);
+ Services.prefs.addObserver(
+ PREF_ACCOUNT_ROOT + "device.name",
+ this.asyncObserver
+ );
+ },
+ onStop() {
+ Services.prefs.removeObserver(
+ PREF_ACCOUNT_ROOT + "device.name",
+ this.asyncObserver
+ );
+ Svc.Obs.remove("fxaccounts:new_device_id", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ switch (topic) {
+ case "nsPref:changed":
+ this._log.debug("client.name preference changed");
+ // Fallthrough intended.
+ case "fxaccounts:new_device_id":
+ await this.addChangedID(this.engine.localID);
+ this.score += SINGLE_USER_THRESHOLD + 1; // ALWAYS SYNC NOW.
+ break;
+ }
+ },
+};
+Object.setPrototypeOf(ClientsTracker.prototype, LegacyTracker.prototype);
diff --git a/services/sync/modules/engines/extension-storage.sys.mjs b/services/sync/modules/engines/extension-storage.sys.mjs
new file mode 100644
index 0000000000..f0ec21811a
--- /dev/null
+++ b/services/sync/modules/engines/extension-storage.sys.mjs
@@ -0,0 +1,303 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import {
+ BridgedEngine,
+ BridgeWrapperXPCOM,
+ LogAdapter,
+} from "resource://services-sync/bridged_engine.sys.mjs";
+import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ MULTI_DEVICE_THRESHOLD: "resource://services-sync/constants.sys.mjs",
+ Observers: "resource://services-common/observers.sys.mjs",
+ SCORE_INCREMENT_MEDIUM: "resource://services-sync/constants.sys.mjs",
+ Svc: "resource://services-sync/util.sys.mjs",
+ extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
+
+ extensionStorageSyncKinto:
+ "resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs",
+});
+
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "StorageSyncService",
+ "@mozilla.org/extensions/storage/sync;1",
+ "nsIInterfaceRequestor"
+);
+
+const PREF_FORCE_ENABLE = "engine.extension-storage.force";
+
+// A helper to indicate whether extension-storage is enabled - it's based on
+// the "addons" pref. The same logic is shared between both engine impls.
+function getEngineEnabled() {
+ // By default, we sync extension storage if we sync addons. This
+ // lets us simplify the UX since users probably don't consider
+ // "extension preferences" a separate category of syncing.
+ // However, we also respect engine.extension-storage.force, which
+ // can be set to true or false, if a power user wants to customize
+ // the behavior despite the lack of UI.
+ const forced = lazy.Svc.Prefs.get(PREF_FORCE_ENABLE, undefined);
+ if (forced !== undefined) {
+ return forced;
+ }
+ return lazy.Svc.Prefs.get("engine.addons", false);
+}
+
+function setEngineEnabled(enabled) {
+ // This will be called by the engine manager when declined on another device.
+ // Things will go a bit pear-shaped if the engine manager tries to end up
+ // with 'addons' and 'extension-storage' in different states - however, this
+ // *can* happen given we support the `engine.extension-storage.force`
+ // preference. So if that pref exists, we set it to this value. If that pref
+ // doesn't exist, we just ignore it and hope that the 'addons' engine is also
+ // going to be set to the same state.
+ if (lazy.Svc.Prefs.has(PREF_FORCE_ENABLE)) {
+ lazy.Svc.Prefs.set(PREF_FORCE_ENABLE, enabled);
+ }
+}
+
+// A "bridged engine" to our webext-storage component.
+export function ExtensionStorageEngineBridge(service) {
+ this.component = lazy.StorageSyncService.getInterface(
+ Ci.mozIBridgedSyncEngine
+ );
+ BridgedEngine.call(this, "Extension-Storage", service);
+ this._bridge = new BridgeWrapperXPCOM(this.component);
+
+ let app_services_logger = Cc["@mozilla.org/appservices/logger;1"].getService(
+ Ci.mozIAppServicesLogger
+ );
+ let logger_target = "app-services:webext_storage:sync";
+ app_services_logger.register(logger_target, new LogAdapter(this._log));
+}
+
+ExtensionStorageEngineBridge.prototype = {
+ syncPriority: 10,
+
+ // Used to override the engine name in telemetry, so that we can distinguish .
+ overrideTelemetryName: "rust-webext-storage",
+
+ _notifyPendingChanges() {
+ return new Promise(resolve => {
+ this.component
+ .QueryInterface(Ci.mozISyncedExtensionStorageArea)
+ .fetchPendingSyncChanges({
+ QueryInterface: ChromeUtils.generateQI([
+ "mozIExtensionStorageListener",
+ "mozIExtensionStorageCallback",
+ ]),
+ onChanged: (extId, json) => {
+ try {
+ lazy.extensionStorageSync.notifyListeners(
+ extId,
+ JSON.parse(json)
+ );
+ } catch (ex) {
+ this._log.warn(
+ `Error notifying change listeners for ${extId}`,
+ ex
+ );
+ }
+ },
+ handleSuccess: resolve,
+ handleError: (code, message) => {
+ this._log.warn(
+ "Error fetching pending synced changes",
+ message,
+ code
+ );
+ resolve();
+ },
+ });
+ });
+ },
+
+ _takeMigrationInfo() {
+ return new Promise((resolve, reject) => {
+ this.component
+ .QueryInterface(Ci.mozIExtensionStorageArea)
+ .takeMigrationInfo({
+ QueryInterface: ChromeUtils.generateQI([
+ "mozIExtensionStorageCallback",
+ ]),
+ handleSuccess: result => {
+ resolve(result ? JSON.parse(result) : null);
+ },
+ handleError: (code, message) => {
+ this._log.warn("Error fetching migration info", message, code);
+ // `takeMigrationInfo` doesn't actually perform the migration,
+ // just reads (and clears) any data stored in the DB from the
+ // previous migration.
+ //
+ // Any errors here are very likely occurring a good while
+ // after the migration ran, so we just warn and pretend
+ // nothing was there.
+ resolve(null);
+ },
+ });
+ });
+ },
+
+ async _syncStartup() {
+ let result = await super._syncStartup();
+ let info = await this._takeMigrationInfo();
+ if (info) {
+ lazy.Observers.notify(
+ "weave:telemetry:migration",
+ info,
+ "webext-storage"
+ );
+ }
+ return result;
+ },
+
+ async _processIncoming() {
+ await super._processIncoming();
+ try {
+ await this._notifyPendingChanges();
+ } catch (ex) {
+ // Failing to notify `storage.onChanged` observers is bad, but shouldn't
+ // interrupt syncing.
+ this._log.warn("Error notifying about synced changes", ex);
+ }
+ },
+
+ get enabled() {
+ return getEngineEnabled();
+ },
+ set enabled(enabled) {
+ setEngineEnabled(enabled);
+ },
+};
+Object.setPrototypeOf(
+ ExtensionStorageEngineBridge.prototype,
+ BridgedEngine.prototype
+);
+
+/**
+ *****************************************************************************
+ *
+ * Deprecated support for Kinto
+ *
+ *****************************************************************************
+ */
+
+/**
+ * The Engine that manages syncing for the web extension "storage"
+ * API, and in particular ext.storage.sync.
+ *
+ * ext.storage.sync is implemented using Kinto, so it has mechanisms
+ * for syncing that we do not need to integrate in the Firefox Sync
+ * framework, so this is something of a stub.
+ */
+export function ExtensionStorageEngineKinto(service) {
+ SyncEngine.call(this, "Extension-Storage", service);
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_skipPercentageChance",
+ "services.sync.extension-storage.skipPercentageChance",
+ 0
+ );
+}
+
+ExtensionStorageEngineKinto.prototype = {
+ _trackerObj: ExtensionStorageTracker,
+ // we don't need these since we implement our own sync logic
+ _storeObj: undefined,
+ _recordObj: undefined,
+
+ syncPriority: 10,
+ allowSkippedRecord: false,
+
+ async _sync() {
+ return lazy.extensionStorageSyncKinto.syncAll();
+ },
+
+ get enabled() {
+ return getEngineEnabled();
+ },
+ // We only need the enabled setter for the edge-case where info/collections
+ // has `extension-storage` - which could happen if the pref to flip the new
+ // engine on was once set but no longer is.
+ set enabled(enabled) {
+ setEngineEnabled(enabled);
+ },
+
+ _wipeClient() {
+ return lazy.extensionStorageSyncKinto.clearAll();
+ },
+
+ shouldSkipSync(syncReason) {
+ if (syncReason == "user" || syncReason == "startup") {
+ this._log.info(
+ `Not skipping extension storage sync: reason == ${syncReason}`
+ );
+ // Always sync if a user clicks the button, or if we're starting up.
+ return false;
+ }
+ // Ensure this wouldn't cause a resync...
+ if (this._tracker.score >= lazy.MULTI_DEVICE_THRESHOLD) {
+ this._log.info(
+ "Not skipping extension storage sync: Would trigger resync anyway"
+ );
+ return false;
+ }
+
+ let probability = this._skipPercentageChance / 100.0;
+ // Math.random() returns a value in the interval [0, 1), so `>` is correct:
+ // if `probability` is 1 skip every time, and if it's 0, never skip.
+ let shouldSkip = probability > Math.random();
+
+ this._log.info(
+ `Skipping extension-storage sync with a chance of ${probability}: ${shouldSkip}`
+ );
+ return shouldSkip;
+ },
+};
+Object.setPrototypeOf(
+ ExtensionStorageEngineKinto.prototype,
+ SyncEngine.prototype
+);
+
+function ExtensionStorageTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ this._ignoreAll = false;
+}
+ExtensionStorageTracker.prototype = {
+ get ignoreAll() {
+ return this._ignoreAll;
+ },
+
+ set ignoreAll(value) {
+ this._ignoreAll = value;
+ },
+
+ onStart() {
+ lazy.Svc.Obs.add("ext.storage.sync-changed", this.asyncObserver);
+ },
+
+ onStop() {
+ lazy.Svc.Obs.remove("ext.storage.sync-changed", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ if (this.ignoreAll) {
+ return;
+ }
+
+ if (topic !== "ext.storage.sync-changed") {
+ return;
+ }
+
+ // Single adds, removes and changes are not so important on their
+ // own, so let's just increment score a bit.
+ this.score += lazy.SCORE_INCREMENT_MEDIUM;
+ },
+};
+Object.setPrototypeOf(ExtensionStorageTracker.prototype, Tracker.prototype);
diff --git a/services/sync/modules/engines/forms.sys.mjs b/services/sync/modules/engines/forms.sys.mjs
new file mode 100644
index 0000000000..3516327659
--- /dev/null
+++ b/services/sync/modules/engines/forms.sys.mjs
@@ -0,0 +1,298 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { SCORE_INCREMENT_MEDIUM } from "resource://services-sync/constants.sys.mjs";
+import {
+ CollectionProblemData,
+ CollectionValidator,
+} from "resource://services-sync/collection_validator.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+const lazy = {};
+ChromeUtils.defineESModuleGetters(lazy, {
+ FormHistory: "resource://gre/modules/FormHistory.sys.mjs",
+});
+
+const FORMS_TTL = 3 * 365 * 24 * 60 * 60; // Three years in seconds.
+
+export function FormRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+FormRec.prototype = {
+ _logName: "Sync.Record.Form",
+ ttl: FORMS_TTL,
+};
+Object.setPrototypeOf(FormRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(FormRec, "cleartext", ["name", "value"]);
+
+var FormWrapper = {
+ _log: Log.repository.getLogger("Sync.Engine.Forms"),
+
+ _getEntryCols: ["fieldname", "value"],
+ _guidCols: ["guid"],
+
+ _search(terms, searchData) {
+ return lazy.FormHistory.search(terms, searchData);
+ },
+
+ async _update(changes) {
+ if (!lazy.FormHistory.enabled) {
+ return; // update isn't going to do anything.
+ }
+ await lazy.FormHistory.update(changes).catch(console.error);
+ },
+
+ async getEntry(guid) {
+ let results = await this._search(this._getEntryCols, { guid });
+ if (!results.length) {
+ return null;
+ }
+ return { name: results[0].fieldname, value: results[0].value };
+ },
+
+ async getGUID(name, value) {
+ // Query for the provided entry.
+ let query = { fieldname: name, value };
+ let results = await this._search(this._guidCols, query);
+ return results.length ? results[0].guid : null;
+ },
+
+ async hasGUID(guid) {
+ // We could probably use a count function here, but search exists...
+ let results = await this._search(this._guidCols, { guid });
+ return !!results.length;
+ },
+
+ async replaceGUID(oldGUID, newGUID) {
+ let changes = {
+ op: "update",
+ guid: oldGUID,
+ newGuid: newGUID,
+ };
+ await this._update(changes);
+ },
+};
+
+export function FormEngine(service) {
+ SyncEngine.call(this, "Forms", service);
+}
+
+FormEngine.prototype = {
+ _storeObj: FormStore,
+ _trackerObj: FormTracker,
+ _recordObj: FormRec,
+
+ syncPriority: 6,
+
+ get prefName() {
+ return "history";
+ },
+
+ async _findDupe(item) {
+ return FormWrapper.getGUID(item.name, item.value);
+ },
+};
+Object.setPrototypeOf(FormEngine.prototype, SyncEngine.prototype);
+
+function FormStore(name, engine) {
+ Store.call(this, name, engine);
+}
+FormStore.prototype = {
+ async _processChange(change) {
+ // If this._changes is defined, then we are applying a batch, so we
+ // can defer it.
+ if (this._changes) {
+ this._changes.push(change);
+ return;
+ }
+
+ // Otherwise we must handle the change right now.
+ await FormWrapper._update(change);
+ },
+
+ async applyIncomingBatch(records, countTelemetry) {
+ Async.checkAppReady();
+ // We collect all the changes to be made then apply them all at once.
+ this._changes = [];
+ let failures = await Store.prototype.applyIncomingBatch.call(
+ this,
+ records,
+ countTelemetry
+ );
+ if (this._changes.length) {
+ await FormWrapper._update(this._changes);
+ }
+ delete this._changes;
+ return failures;
+ },
+
+ async getAllIDs() {
+ let results = await FormWrapper._search(["guid"], []);
+ let guids = {};
+ for (let result of results) {
+ guids[result.guid] = true;
+ }
+ return guids;
+ },
+
+ async changeItemID(oldID, newID) {
+ await FormWrapper.replaceGUID(oldID, newID);
+ },
+
+ async itemExists(id) {
+ return FormWrapper.hasGUID(id);
+ },
+
+ async createRecord(id, collection) {
+ let record = new FormRec(collection, id);
+ let entry = await FormWrapper.getEntry(id);
+ if (entry != null) {
+ record.name = entry.name;
+ record.value = entry.value;
+ } else {
+ record.deleted = true;
+ }
+ return record;
+ },
+
+ async create(record) {
+ this._log.trace("Adding form record for " + record.name);
+ let change = {
+ op: "add",
+ guid: record.id,
+ fieldname: record.name,
+ value: record.value,
+ };
+ await this._processChange(change);
+ },
+
+ async remove(record) {
+ this._log.trace("Removing form record: " + record.id);
+ let change = {
+ op: "remove",
+ guid: record.id,
+ };
+ await this._processChange(change);
+ },
+
+ async update(record) {
+ this._log.trace("Ignoring form record update request!");
+ },
+
+ async wipe() {
+ let change = {
+ op: "remove",
+ };
+ await FormWrapper._update(change);
+ },
+};
+Object.setPrototypeOf(FormStore.prototype, Store.prototype);
+
+function FormTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+FormTracker.prototype = {
+ QueryInterface: ChromeUtils.generateQI([
+ "nsIObserver",
+ "nsISupportsWeakReference",
+ ]),
+
+ onStart() {
+ Svc.Obs.add("satchel-storage-changed", this.asyncObserver);
+ },
+
+ onStop() {
+ Svc.Obs.remove("satchel-storage-changed", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ if (this.ignoreAll) {
+ return;
+ }
+ switch (topic) {
+ case "satchel-storage-changed":
+ if (data == "formhistory-add" || data == "formhistory-remove") {
+ let guid = subject.QueryInterface(Ci.nsISupportsString).toString();
+ await this.trackEntry(guid);
+ }
+ break;
+ }
+ },
+
+ async trackEntry(guid) {
+ const added = await this.addChangedID(guid);
+ if (added) {
+ this.score += SCORE_INCREMENT_MEDIUM;
+ }
+ },
+};
+Object.setPrototypeOf(FormTracker.prototype, LegacyTracker.prototype);
+
+class FormsProblemData extends CollectionProblemData {
+ getSummary() {
+ // We don't support syncing deleted form data, so "clientMissing" isn't a problem
+ return super.getSummary().filter(entry => entry.name !== "clientMissing");
+ }
+}
+
+export class FormValidator extends CollectionValidator {
+ constructor() {
+ super("forms", "id", ["name", "value"]);
+ this.ignoresMissingClients = true;
+ }
+
+ emptyProblemData() {
+ return new FormsProblemData();
+ }
+
+ async getClientItems() {
+ return FormWrapper._search(["guid", "fieldname", "value"], {});
+ }
+
+ normalizeClientItem(item) {
+ return {
+ id: item.guid,
+ guid: item.guid,
+ name: item.fieldname,
+ fieldname: item.fieldname,
+ value: item.value,
+ original: item,
+ };
+ }
+
+ async normalizeServerItem(item) {
+ let res = Object.assign(
+ {
+ guid: item.id,
+ fieldname: item.name,
+ original: item,
+ },
+ item
+ );
+ // Missing `name` or `value` causes the getGUID call to throw
+ if (item.name !== undefined && item.value !== undefined) {
+ let guid = await FormWrapper.getGUID(item.name, item.value);
+ if (guid) {
+ res.guid = guid;
+ res.id = guid;
+ res.duped = true;
+ }
+ }
+
+ return res;
+ }
+}
diff --git a/services/sync/modules/engines/history.sys.mjs b/services/sync/modules/engines/history.sys.mjs
new file mode 100644
index 0000000000..491e7c8a89
--- /dev/null
+++ b/services/sync/modules/engines/history.sys.mjs
@@ -0,0 +1,585 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const HISTORY_TTL = 5184000; // 60 days in milliseconds
+const THIRTY_DAYS_IN_MS = 2592000000; // 30 days in milliseconds
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+import {
+ MAX_HISTORY_DOWNLOAD,
+ MAX_HISTORY_UPLOAD,
+ SCORE_INCREMENT_SMALL,
+ SCORE_INCREMENT_XLARGE,
+} from "resource://services-sync/constants.sys.mjs";
+
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ PlacesSyncUtils: "resource://gre/modules/PlacesSyncUtils.sys.mjs",
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+});
+
+export function HistoryRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+HistoryRec.prototype = {
+ _logName: "Sync.Record.History",
+ ttl: HISTORY_TTL,
+};
+Object.setPrototypeOf(HistoryRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(HistoryRec, "cleartext", ["histUri", "title", "visits"]);
+
+export function HistoryEngine(service) {
+ SyncEngine.call(this, "History", service);
+}
+
+HistoryEngine.prototype = {
+ _recordObj: HistoryRec,
+ _storeObj: HistoryStore,
+ _trackerObj: HistoryTracker,
+ downloadLimit: MAX_HISTORY_DOWNLOAD,
+
+ syncPriority: 7,
+
+ async getSyncID() {
+ return lazy.PlacesSyncUtils.history.getSyncId();
+ },
+
+ async ensureCurrentSyncID(newSyncID) {
+ this._log.debug(
+ "Checking if server sync ID ${newSyncID} matches existing",
+ { newSyncID }
+ );
+ await lazy.PlacesSyncUtils.history.ensureCurrentSyncId(newSyncID);
+ return newSyncID;
+ },
+
+ async resetSyncID() {
+ // First, delete the collection on the server. It's fine if we're
+ // interrupted here: on the next sync, we'll detect that our old sync ID is
+ // now stale, and start over as a first sync.
+ await this._deleteServerCollection();
+ // Then, reset our local sync ID.
+ return this.resetLocalSyncID();
+ },
+
+ async resetLocalSyncID() {
+ let newSyncID = await lazy.PlacesSyncUtils.history.resetSyncId();
+ this._log.debug("Assigned new sync ID ${newSyncID}", { newSyncID });
+ return newSyncID;
+ },
+
+ async getLastSync() {
+ let lastSync = await lazy.PlacesSyncUtils.history.getLastSync();
+ return lastSync;
+ },
+
+ async setLastSync(lastSync) {
+ await lazy.PlacesSyncUtils.history.setLastSync(lastSync);
+ },
+
+ shouldSyncURL(url) {
+ return !url.startsWith("file:");
+ },
+
+ async pullNewChanges() {
+ const changedIDs = await this._tracker.getChangedIDs();
+ let modifiedGUIDs = Object.keys(changedIDs);
+ if (!modifiedGUIDs.length) {
+ return {};
+ }
+
+ let guidsToRemove =
+ await lazy.PlacesSyncUtils.history.determineNonSyncableGuids(
+ modifiedGUIDs
+ );
+ await this._tracker.removeChangedID(...guidsToRemove);
+ return changedIDs;
+ },
+
+ async _resetClient() {
+ await super._resetClient();
+ await lazy.PlacesSyncUtils.history.reset();
+ },
+};
+Object.setPrototypeOf(HistoryEngine.prototype, SyncEngine.prototype);
+
+function HistoryStore(name, engine) {
+ Store.call(this, name, engine);
+}
+
+HistoryStore.prototype = {
+ // We try and only update this many visits at one time.
+ MAX_VISITS_PER_INSERT: 500,
+
+ // Some helper functions to handle GUIDs
+ async setGUID(uri, guid) {
+ if (!guid) {
+ guid = Utils.makeGUID();
+ }
+
+ try {
+ await lazy.PlacesSyncUtils.history.changeGuid(uri, guid);
+ } catch (e) {
+ this._log.error("Error setting GUID ${guid} for URI ${uri}", guid, uri);
+ }
+
+ return guid;
+ },
+
+ async GUIDForUri(uri, create) {
+ // Use the existing GUID if it exists
+ let guid;
+ try {
+ guid = await lazy.PlacesSyncUtils.history.fetchGuidForURL(uri);
+ } catch (e) {
+ this._log.error("Error fetching GUID for URL ${uri}", uri);
+ }
+
+ // If the URI has an existing GUID, return it.
+ if (guid) {
+ return guid;
+ }
+
+ // If the URI doesn't have a GUID and we were indicated to create one.
+ if (create) {
+ return this.setGUID(uri);
+ }
+
+ // If the URI doesn't have a GUID and we didn't create one for it.
+ return null;
+ },
+
+ async changeItemID(oldID, newID) {
+ let info = await lazy.PlacesSyncUtils.history.fetchURLInfoForGuid(oldID);
+ if (!info) {
+ throw new Error(`Can't change ID for nonexistent history entry ${oldID}`);
+ }
+ this.setGUID(info.url, newID);
+ },
+
+ async getAllIDs() {
+ let urls = await lazy.PlacesSyncUtils.history.getAllURLs({
+ since: new Date(Date.now() - THIRTY_DAYS_IN_MS),
+ limit: MAX_HISTORY_UPLOAD,
+ });
+
+ let urlsByGUID = {};
+ for (let url of urls) {
+ if (!this.engine.shouldSyncURL(url)) {
+ continue;
+ }
+ let guid = await this.GUIDForUri(url, true);
+ urlsByGUID[guid] = url;
+ }
+ return urlsByGUID;
+ },
+
+ async applyIncomingBatch(records, countTelemetry) {
+ // Convert incoming records to mozIPlaceInfo objects which are applied as
+ // either history additions or removals.
+ let failed = [];
+ let toAdd = [];
+ let toRemove = [];
+ await Async.yieldingForEach(records, async record => {
+ if (record.deleted) {
+ toRemove.push(record);
+ } else {
+ try {
+ let pageInfo = await this._recordToPlaceInfo(record);
+ if (pageInfo) {
+ toAdd.push(pageInfo);
+ }
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.error("Failed to create a place info", ex);
+ this._log.trace("The record that failed", record);
+ failed.push(record.id);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+ }
+ });
+ if (toAdd.length || toRemove.length) {
+ if (toRemove.length) {
+ // PlacesUtils.history.remove takes an array of visits to remove,
+ // but the error semantics are tricky - a single "bad" entry will cause
+ // an exception before anything is removed. So we do remove them one at
+ // a time.
+ await Async.yieldingForEach(toRemove, async record => {
+ try {
+ await this.remove(record);
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.error("Failed to delete a place info", ex);
+ this._log.trace("The record that failed", record);
+ failed.push(record.id);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+ });
+ }
+ for (let chunk of this._generateChunks(toAdd)) {
+ // Per bug 1415560, we ignore any exceptions returned by insertMany
+ // as they are likely to be spurious. We do supply an onError handler
+ // and log the exceptions seen there as they are likely to be
+ // informative, but we still never abort the sync based on them.
+ try {
+ await lazy.PlacesUtils.history.insertMany(
+ chunk,
+ null,
+ failedVisit => {
+ this._log.info(
+ "Failed to insert a history record",
+ failedVisit.guid
+ );
+ this._log.trace("The record that failed", failedVisit);
+ failed.push(failedVisit.guid);
+ }
+ );
+ } catch (ex) {
+ this._log.info("Failed to insert history records", ex);
+ countTelemetry.addIncomingFailedReason(ex.message);
+ }
+ }
+ }
+
+ return failed;
+ },
+
+ /**
+ * Returns a generator that splits records into sanely sized chunks suitable
+ * for passing to places to prevent places doing bad things at shutdown.
+ */
+ *_generateChunks(records) {
+ // We chunk based on the number of *visits* inside each record. However,
+ // we do not split a single record into multiple records, because at some
+ // time in the future, we intend to ensure these records are ordered by
+ // lastModified, and advance the engine's timestamp as we process them,
+ // meaning we can resume exactly where we left off next sync - although
+ // currently that's not done, so we will retry the entire batch next sync
+ // if interrupted.
+ // ie, this means that if a single record has more than MAX_VISITS_PER_INSERT
+ // visits, we will call insertMany() with exactly 1 record, but with
+ // more than MAX_VISITS_PER_INSERT visits.
+ let curIndex = 0;
+ this._log.debug(`adding ${records.length} records to history`);
+ while (curIndex < records.length) {
+ Async.checkAppReady(); // may throw if we are shutting down.
+ let toAdd = []; // what we are going to insert.
+ let count = 0; // a counter which tells us when toAdd is full.
+ do {
+ let record = records[curIndex];
+ curIndex += 1;
+ toAdd.push(record);
+ count += record.visits.length;
+ } while (
+ curIndex < records.length &&
+ count + records[curIndex].visits.length <= this.MAX_VISITS_PER_INSERT
+ );
+ this._log.trace(`adding ${toAdd.length} items in this chunk`);
+ yield toAdd;
+ }
+ },
+
+ /* An internal helper to determine if we can add an entry to places.
+ Exists primarily so tests can override it.
+ */
+ _canAddURI(uri) {
+ return lazy.PlacesUtils.history.canAddURI(uri);
+ },
+
+ /**
+ * Converts a Sync history record to a mozIPlaceInfo.
+ *
+ * Throws if an invalid record is encountered (invalid URI, etc.),
+ * returns a new PageInfo object if the record is to be applied, null
+ * otherwise (no visits to add, etc.),
+ */
+ async _recordToPlaceInfo(record) {
+ // Sort out invalid URIs and ones Places just simply doesn't want.
+ record.url = lazy.PlacesUtils.normalizeToURLOrGUID(record.histUri);
+ record.uri = CommonUtils.makeURI(record.histUri);
+
+ if (!Utils.checkGUID(record.id)) {
+ this._log.warn("Encountered record with invalid GUID: " + record.id);
+ return null;
+ }
+ record.guid = record.id;
+
+ if (
+ !this._canAddURI(record.uri) ||
+ !this.engine.shouldSyncURL(record.uri.spec)
+ ) {
+ this._log.trace(
+ "Ignoring record " +
+ record.id +
+ " with URI " +
+ record.uri.spec +
+ ": can't add this URI."
+ );
+ return null;
+ }
+
+ // We dupe visits by date and type. So an incoming visit that has
+ // the same timestamp and type as a local one won't get applied.
+ // To avoid creating new objects, we rewrite the query result so we
+ // can simply check for containment below.
+ let curVisitsAsArray = [];
+ let curVisits = new Set();
+ try {
+ curVisitsAsArray = await lazy.PlacesSyncUtils.history.fetchVisitsForURL(
+ record.histUri
+ );
+ } catch (e) {
+ this._log.error(
+ "Error while fetching visits for URL ${record.histUri}",
+ record.histUri
+ );
+ }
+ let oldestAllowed =
+ lazy.PlacesSyncUtils.bookmarks.EARLIEST_BOOKMARK_TIMESTAMP;
+ if (curVisitsAsArray.length == 20) {
+ let oldestVisit = curVisitsAsArray[curVisitsAsArray.length - 1];
+ oldestAllowed = lazy.PlacesSyncUtils.history.clampVisitDate(
+ lazy.PlacesUtils.toDate(oldestVisit.date).getTime()
+ );
+ }
+
+ let i, k;
+ for (i = 0; i < curVisitsAsArray.length; i++) {
+ // Same logic as used in the loop below to generate visitKey.
+ let { date, type } = curVisitsAsArray[i];
+ let dateObj = lazy.PlacesUtils.toDate(date);
+ let millis = lazy.PlacesSyncUtils.history
+ .clampVisitDate(dateObj)
+ .getTime();
+ curVisits.add(`${millis},${type}`);
+ }
+
+ // Walk through the visits, make sure we have sound data, and eliminate
+ // dupes. The latter is done by rewriting the array in-place.
+ for (i = 0, k = 0; i < record.visits.length; i++) {
+ let visit = (record.visits[k] = record.visits[i]);
+
+ if (
+ !visit.date ||
+ typeof visit.date != "number" ||
+ !Number.isInteger(visit.date)
+ ) {
+ this._log.warn(
+ "Encountered record with invalid visit date: " + visit.date
+ );
+ continue;
+ }
+
+ if (
+ !visit.type ||
+ !Object.values(lazy.PlacesUtils.history.TRANSITIONS).includes(
+ visit.type
+ )
+ ) {
+ this._log.warn(
+ "Encountered record with invalid visit type: " +
+ visit.type +
+ "; ignoring."
+ );
+ continue;
+ }
+
+ // Dates need to be integers. Future and far past dates are clamped to the
+ // current date and earliest sensible date, respectively.
+ let originalVisitDate = lazy.PlacesUtils.toDate(Math.round(visit.date));
+ visit.date =
+ lazy.PlacesSyncUtils.history.clampVisitDate(originalVisitDate);
+
+ if (visit.date.getTime() < oldestAllowed) {
+ // Visit is older than the oldest visit we have, and we have so many
+ // visits for this uri that we hit our limit when inserting.
+ continue;
+ }
+ let visitKey = `${visit.date.getTime()},${visit.type}`;
+ if (curVisits.has(visitKey)) {
+ // Visit is a dupe, don't increment 'k' so the element will be
+ // overwritten.
+ continue;
+ }
+
+ // Note the visit key, so that we don't add duplicate visits with
+ // clamped timestamps.
+ curVisits.add(visitKey);
+
+ visit.transition = visit.type;
+ k += 1;
+ }
+ record.visits.length = k; // truncate array
+
+ // No update if there aren't any visits to apply.
+ // History wants at least one visit.
+ // In any case, the only thing we could change would be the title
+ // and that shouldn't change without a visit.
+ if (!record.visits.length) {
+ this._log.trace(
+ "Ignoring record " +
+ record.id +
+ " with URI " +
+ record.uri.spec +
+ ": no visits to add."
+ );
+ return null;
+ }
+
+ // PageInfo is validated using validateItemProperties which does a shallow
+ // copy of the properties. Since record uses getters some of the properties
+ // are not copied over. Thus we create and return a new object.
+ let pageInfo = {
+ title: record.title,
+ url: record.url,
+ guid: record.guid,
+ visits: record.visits,
+ };
+
+ return pageInfo;
+ },
+
+ async remove(record) {
+ this._log.trace("Removing page: " + record.id);
+ let removed = await lazy.PlacesUtils.history.remove(record.id);
+ if (removed) {
+ this._log.trace("Removed page: " + record.id);
+ } else {
+ this._log.debug("Page already removed: " + record.id);
+ }
+ },
+
+ async itemExists(id) {
+ return !!(await lazy.PlacesSyncUtils.history.fetchURLInfoForGuid(id));
+ },
+
+ async createRecord(id, collection) {
+ let foo = await lazy.PlacesSyncUtils.history.fetchURLInfoForGuid(id);
+ let record = new HistoryRec(collection, id);
+ if (foo) {
+ record.histUri = foo.url;
+ record.title = foo.title;
+ record.sortindex = foo.frecency;
+ try {
+ record.visits = await lazy.PlacesSyncUtils.history.fetchVisitsForURL(
+ record.histUri
+ );
+ } catch (e) {
+ this._log.error(
+ "Error while fetching visits for URL ${record.histUri}",
+ record.histUri
+ );
+ record.visits = [];
+ }
+ } else {
+ record.deleted = true;
+ }
+
+ return record;
+ },
+
+ async wipe() {
+ return lazy.PlacesSyncUtils.history.wipe();
+ },
+};
+Object.setPrototypeOf(HistoryStore.prototype, Store.prototype);
+
+function HistoryTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+HistoryTracker.prototype = {
+ onStart() {
+ this._log.info("Adding Places observer.");
+ this._placesObserver = new PlacesWeakCallbackWrapper(
+ this.handlePlacesEvents.bind(this)
+ );
+ PlacesObservers.addListener(
+ ["page-visited", "history-cleared", "page-removed"],
+ this._placesObserver
+ );
+ },
+
+ onStop() {
+ this._log.info("Removing Places observer.");
+ if (this._placesObserver) {
+ PlacesObservers.removeListener(
+ ["page-visited", "history-cleared", "page-removed"],
+ this._placesObserver
+ );
+ }
+ },
+
+ QueryInterface: ChromeUtils.generateQI(["nsISupportsWeakReference"]),
+
+ handlePlacesEvents(aEvents) {
+ this.asyncObserver.enqueueCall(() => this._handlePlacesEvents(aEvents));
+ },
+
+ async _handlePlacesEvents(aEvents) {
+ if (this.ignoreAll) {
+ this._log.trace(
+ "ignoreAll: ignoring visits [" +
+ aEvents.map(v => v.guid).join(",") +
+ "]"
+ );
+ return;
+ }
+ for (let event of aEvents) {
+ switch (event.type) {
+ case "page-visited": {
+ this._log.trace("'page-visited': " + event.url);
+ if (
+ this.engine.shouldSyncURL(event.url) &&
+ (await this.addChangedID(event.pageGuid))
+ ) {
+ this.score += SCORE_INCREMENT_SMALL;
+ }
+ break;
+ }
+ case "history-cleared": {
+ this._log.trace("history-cleared");
+ // Note that we're going to trigger a sync, but none of the cleared
+ // pages are tracked, so the deletions will not be propagated.
+ // See Bug 578694.
+ this.score += SCORE_INCREMENT_XLARGE;
+ break;
+ }
+ case "page-removed": {
+ if (event.reason === PlacesVisitRemoved.REASON_EXPIRED) {
+ return;
+ }
+
+ this._log.trace(
+ "page-removed: " + event.url + ", reason " + event.reason
+ );
+ const added = await this.addChangedID(event.pageGuid);
+ if (added) {
+ this.score += event.isRemovedFromStore
+ ? SCORE_INCREMENT_XLARGE
+ : SCORE_INCREMENT_SMALL;
+ }
+ break;
+ }
+ }
+ }
+ },
+};
+Object.setPrototypeOf(HistoryTracker.prototype, LegacyTracker.prototype);
diff --git a/services/sync/modules/engines/passwords.sys.mjs b/services/sync/modules/engines/passwords.sys.mjs
new file mode 100644
index 0000000000..8f24d7333a
--- /dev/null
+++ b/services/sync/modules/engines/passwords.sys.mjs
@@ -0,0 +1,565 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import {
+ Collection,
+ CryptoWrapper,
+} from "resource://services-sync/record.sys.mjs";
+
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import { CollectionValidator } from "resource://services-sync/collection_validator.sys.mjs";
+import {
+ Store,
+ SyncEngine,
+ LegacyTracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+
+// These are valid fields the server could have for a logins record
+// we mainly use this to detect if there are any unknownFields and
+// store (but don't process) those fields to roundtrip them back
+const VALID_LOGIN_FIELDS = [
+ "id",
+ "displayOrigin",
+ "formSubmitURL",
+ "formActionOrigin",
+ "httpRealm",
+ "hostname",
+ "origin",
+ "password",
+ "passwordField",
+ "timeCreated",
+ "timeLastUsed",
+ "timePasswordChanged",
+ "timesUsed",
+ "username",
+ "usernameField",
+ "unknownFields",
+];
+
+const SYNCABLE_LOGIN_FIELDS = [
+ // `nsILoginInfo` fields.
+ "hostname",
+ "formSubmitURL",
+ "httpRealm",
+ "username",
+ "password",
+ "usernameField",
+ "passwordField",
+
+ // `nsILoginMetaInfo` fields.
+ "timeCreated",
+ "timePasswordChanged",
+];
+
+// Compares two logins to determine if their syncable fields changed. The login
+// manager fires `modifyLogin` for changes to all fields, including ones we
+// don't sync. In particular, `timeLastUsed` changes shouldn't mark the login
+// for upload; otherwise, we might overwrite changed passwords before they're
+// downloaded (bug 973166).
+function isSyncableChange(oldLogin, newLogin) {
+ oldLogin.QueryInterface(Ci.nsILoginMetaInfo).QueryInterface(Ci.nsILoginInfo);
+ newLogin.QueryInterface(Ci.nsILoginMetaInfo).QueryInterface(Ci.nsILoginInfo);
+ for (let property of SYNCABLE_LOGIN_FIELDS) {
+ if (oldLogin[property] != newLogin[property]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+export function LoginRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+LoginRec.prototype = {
+ _logName: "Sync.Record.Login",
+
+ cleartextToString() {
+ let o = Object.assign({}, this.cleartext);
+ if (o.password) {
+ o.password = "X".repeat(o.password.length);
+ }
+ return JSON.stringify(o);
+ },
+};
+Object.setPrototypeOf(LoginRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(LoginRec, "cleartext", [
+ "hostname",
+ "formSubmitURL",
+ "httpRealm",
+ "username",
+ "password",
+ "usernameField",
+ "passwordField",
+ "timeCreated",
+ "timePasswordChanged",
+]);
+
+export function PasswordEngine(service) {
+ SyncEngine.call(this, "Passwords", service);
+}
+
+PasswordEngine.prototype = {
+ _storeObj: PasswordStore,
+ _trackerObj: PasswordTracker,
+ _recordObj: LoginRec,
+
+ syncPriority: 2,
+
+ // Metadata for syncing is stored in the login manager
+ async ensureCurrentSyncID(newSyncID) {
+ return Services.logins.ensureCurrentSyncID(newSyncID);
+ },
+
+ async getLastSync() {
+ let legacyValue = await super.getLastSync();
+ if (legacyValue) {
+ await this.setLastSync(legacyValue);
+ Svc.Prefs.reset(this.name + ".lastSync");
+ this._log.debug(
+ `migrated timestamp of ${legacyValue} to the logins store`
+ );
+ return legacyValue;
+ }
+ return Services.logins.getLastSync();
+ },
+
+ async setLastSync(timestamp) {
+ await Services.logins.setLastSync(timestamp);
+ },
+
+ async _syncFinish() {
+ await SyncEngine.prototype._syncFinish.call(this);
+
+ // Delete the Weave credentials from the server once.
+ if (!Svc.Prefs.get("deletePwdFxA", false)) {
+ try {
+ let ids = [];
+ for (let host of Utils.getSyncCredentialsHosts()) {
+ for (let info of Services.logins.findLogins(host, "", "")) {
+ ids.push(info.QueryInterface(Ci.nsILoginMetaInfo).guid);
+ }
+ }
+ if (ids.length) {
+ let coll = new Collection(this.engineURL, null, this.service);
+ coll.ids = ids;
+ let ret = await coll.delete();
+ this._log.debug("Delete result: " + ret);
+ if (!ret.success && ret.status != 400) {
+ // A non-400 failure means try again next time.
+ return;
+ }
+ } else {
+ this._log.debug("Didn't find any passwords to delete");
+ }
+ // If there were no ids to delete, or we succeeded, or got a 400,
+ // record success.
+ Svc.Prefs.set("deletePwdFxA", true);
+ Svc.Prefs.reset("deletePwd"); // The old prefname we previously used.
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Password deletes failed", ex);
+ }
+ }
+ },
+
+ async _findDupe(item) {
+ let login = this._store._nsLoginInfoFromRecord(item);
+ if (!login) {
+ return null;
+ }
+
+ let logins = Services.logins.findLogins(
+ login.origin,
+ login.formActionOrigin,
+ login.httpRealm
+ );
+
+ await Async.promiseYield(); // Yield back to main thread after synchronous operation.
+
+ // Look for existing logins that match the origin, but ignore the password.
+ for (let local of logins) {
+ if (login.matches(local, true) && local instanceof Ci.nsILoginMetaInfo) {
+ return local.guid;
+ }
+ }
+
+ return null;
+ },
+
+ async pullAllChanges() {
+ let changes = {};
+ let ids = await this._store.getAllIDs();
+ for (let [id, info] of Object.entries(ids)) {
+ changes[id] = info.timePasswordChanged / 1000;
+ }
+ return changes;
+ },
+
+ getValidator() {
+ return new PasswordValidator();
+ },
+};
+Object.setPrototypeOf(PasswordEngine.prototype, SyncEngine.prototype);
+
+function PasswordStore(name, engine) {
+ Store.call(this, name, engine);
+ this._nsLoginInfo = new Components.Constructor(
+ "@mozilla.org/login-manager/loginInfo;1",
+ Ci.nsILoginInfo,
+ "init"
+ );
+}
+PasswordStore.prototype = {
+ _newPropertyBag() {
+ return Cc["@mozilla.org/hash-property-bag;1"].createInstance(
+ Ci.nsIWritablePropertyBag2
+ );
+ },
+
+ // Returns an stringified object of any fields not "known" by this client
+ // mainly used to to prevent data loss for other clients by roundtripping
+ // these fields without processing them
+ _processUnknownFields(record) {
+ let unknownFields = {};
+ let keys = Object.keys(record);
+ keys
+ .filter(key => !VALID_LOGIN_FIELDS.includes(key))
+ .forEach(key => {
+ unknownFields[key] = record[key];
+ });
+ // If we found some unknown fields, we stringify it to be able
+ // to properly encrypt it for roundtripping since we can't know if
+ // it contained sensitive fields or not
+ if (Object.keys(unknownFields).length) {
+ return JSON.stringify(unknownFields);
+ }
+ return null;
+ },
+
+ /**
+ * Return an instance of nsILoginInfo (and, implicitly, nsILoginMetaInfo).
+ */
+ _nsLoginInfoFromRecord(record) {
+ function nullUndefined(x) {
+ return x == undefined ? null : x;
+ }
+
+ function stringifyNullUndefined(x) {
+ return x == undefined || x == null ? "" : x;
+ }
+
+ if (record.formSubmitURL && record.httpRealm) {
+ this._log.warn(
+ "Record " +
+ record.id +
+ " has both formSubmitURL and httpRealm. Skipping."
+ );
+ return null;
+ }
+
+ // Passing in "undefined" results in an empty string, which later
+ // counts as a value. Explicitly `|| null` these fields according to JS
+ // truthiness. Records with empty strings or null will be unmolested.
+ let info = new this._nsLoginInfo(
+ record.hostname,
+ nullUndefined(record.formSubmitURL),
+ nullUndefined(record.httpRealm),
+ stringifyNullUndefined(record.username),
+ record.password,
+ record.usernameField,
+ record.passwordField
+ );
+
+ info.QueryInterface(Ci.nsILoginMetaInfo);
+ info.guid = record.id;
+ if (record.timeCreated && !isNaN(new Date(record.timeCreated).getTime())) {
+ info.timeCreated = record.timeCreated;
+ }
+ if (
+ record.timePasswordChanged &&
+ !isNaN(new Date(record.timePasswordChanged).getTime())
+ ) {
+ info.timePasswordChanged = record.timePasswordChanged;
+ }
+
+ // Check the record if there are any unknown fields from other clients
+ // that we want to roundtrip during sync to prevent data loss
+ let unknownFields = this._processUnknownFields(record.cleartext);
+ if (unknownFields) {
+ info.unknownFields = unknownFields;
+ }
+ return info;
+ },
+
+ async _getLoginFromGUID(id) {
+ let prop = this._newPropertyBag();
+ prop.setPropertyAsAUTF8String("guid", id);
+
+ let logins = Services.logins.searchLogins(prop);
+ await Async.promiseYield(); // Yield back to main thread after synchronous operation.
+
+ if (logins.length) {
+ this._log.trace(logins.length + " items matching " + id + " found.");
+ return logins[0];
+ }
+
+ this._log.trace("No items matching " + id + " found. Ignoring");
+ return null;
+ },
+
+ async getAllIDs() {
+ let items = {};
+ let logins = Services.logins.getAllLogins();
+
+ for (let i = 0; i < logins.length; i++) {
+ // Skip over Weave password/passphrase entries.
+ let metaInfo = logins[i].QueryInterface(Ci.nsILoginMetaInfo);
+ if (Utils.getSyncCredentialsHosts().has(metaInfo.origin)) {
+ continue;
+ }
+
+ items[metaInfo.guid] = metaInfo;
+ }
+
+ return items;
+ },
+
+ async changeItemID(oldID, newID) {
+ this._log.trace("Changing item ID: " + oldID + " to " + newID);
+
+ let oldLogin = await this._getLoginFromGUID(oldID);
+ if (!oldLogin) {
+ this._log.trace("Can't change item ID: item doesn't exist");
+ return;
+ }
+ if (await this._getLoginFromGUID(newID)) {
+ this._log.trace("Can't change item ID: new ID already in use");
+ return;
+ }
+
+ let prop = this._newPropertyBag();
+ prop.setPropertyAsAUTF8String("guid", newID);
+
+ Services.logins.modifyLogin(oldLogin, prop);
+ },
+
+ async itemExists(id) {
+ return !!(await this._getLoginFromGUID(id));
+ },
+
+ async createRecord(id, collection) {
+ let record = new LoginRec(collection, id);
+ let login = await this._getLoginFromGUID(id);
+
+ if (!login) {
+ record.deleted = true;
+ return record;
+ }
+
+ record.hostname = login.origin;
+ record.formSubmitURL = login.formActionOrigin;
+ record.httpRealm = login.httpRealm;
+ record.username = login.username;
+ record.password = login.password;
+ record.usernameField = login.usernameField;
+ record.passwordField = login.passwordField;
+
+ // Optional fields.
+ login.QueryInterface(Ci.nsILoginMetaInfo);
+ record.timeCreated = login.timeCreated;
+ record.timePasswordChanged = login.timePasswordChanged;
+
+ // put the unknown fields back to the top-level record
+ // during upload
+ if (login.unknownFields) {
+ let unknownFields = JSON.parse(login.unknownFields);
+ if (unknownFields) {
+ Object.keys(unknownFields).forEach(key => {
+ // We have to manually add it to the cleartext since that's
+ // what gets processed during upload
+ record.cleartext[key] = unknownFields[key];
+ });
+ }
+ }
+
+ return record;
+ },
+
+ async create(record) {
+ let login = this._nsLoginInfoFromRecord(record);
+ if (!login) {
+ return;
+ }
+
+ this._log.trace("Adding login for " + record.hostname);
+ this._log.trace(
+ "httpRealm: " +
+ JSON.stringify(login.httpRealm) +
+ "; " +
+ "formSubmitURL: " +
+ JSON.stringify(login.formActionOrigin)
+ );
+ await Services.logins.addLoginAsync(login);
+ },
+
+ async remove(record) {
+ this._log.trace("Removing login " + record.id);
+
+ let loginItem = await this._getLoginFromGUID(record.id);
+ if (!loginItem) {
+ this._log.trace("Asked to remove record that doesn't exist, ignoring");
+ return;
+ }
+
+ Services.logins.removeLogin(loginItem);
+ },
+
+ async update(record) {
+ let loginItem = await this._getLoginFromGUID(record.id);
+ if (!loginItem) {
+ this._log.trace("Skipping update for unknown item: " + record.hostname);
+ return;
+ }
+
+ this._log.trace("Updating " + record.hostname);
+ let newinfo = this._nsLoginInfoFromRecord(record);
+ if (!newinfo) {
+ return;
+ }
+
+ Services.logins.modifyLogin(loginItem, newinfo);
+ },
+
+ async wipe() {
+ Services.logins.removeAllUserFacingLogins();
+ },
+};
+Object.setPrototypeOf(PasswordStore.prototype, Store.prototype);
+
+function PasswordTracker(name, engine) {
+ LegacyTracker.call(this, name, engine);
+}
+PasswordTracker.prototype = {
+ onStart() {
+ Svc.Obs.add("passwordmgr-storage-changed", this.asyncObserver);
+ },
+
+ onStop() {
+ Svc.Obs.remove("passwordmgr-storage-changed", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ if (this.ignoreAll) {
+ return;
+ }
+
+ // A single add, remove or change or removing all items
+ // will trigger a sync for MULTI_DEVICE.
+ switch (data) {
+ case "modifyLogin": {
+ subject.QueryInterface(Ci.nsIArrayExtensions);
+ let oldLogin = subject.GetElementAt(0);
+ let newLogin = subject.GetElementAt(1);
+ if (!isSyncableChange(oldLogin, newLogin)) {
+ this._log.trace(`${data}: Ignoring change for ${newLogin.guid}`);
+ break;
+ }
+ const tracked = await this._trackLogin(newLogin);
+ if (tracked) {
+ this._log.trace(`${data}: Tracking change for ${newLogin.guid}`);
+ }
+ break;
+ }
+
+ case "addLogin":
+ case "removeLogin":
+ subject
+ .QueryInterface(Ci.nsILoginMetaInfo)
+ .QueryInterface(Ci.nsILoginInfo);
+ const tracked = await this._trackLogin(subject);
+ if (tracked) {
+ this._log.trace(data + ": " + subject.guid);
+ }
+ break;
+
+ // Bug 1613620: We iterate through the removed logins and track them to ensure
+ // the logins are deleted across synced devices/accounts
+ case "removeAllLogins":
+ subject.QueryInterface(Ci.nsIArrayExtensions);
+ let count = subject.Count();
+ for (let i = 0; i < count; i++) {
+ let currentSubject = subject.GetElementAt(i);
+ let tracked = await this._trackLogin(currentSubject);
+ if (tracked) {
+ this._log.trace(data + ": " + currentSubject.guid);
+ }
+ }
+ this.score += SCORE_INCREMENT_XLARGE;
+ break;
+ }
+ },
+
+ async _trackLogin(login) {
+ if (Utils.getSyncCredentialsHosts().has(login.origin)) {
+ // Skip over Weave password/passphrase changes.
+ return false;
+ }
+ const added = await this.addChangedID(login.guid);
+ if (!added) {
+ return false;
+ }
+ this.score += SCORE_INCREMENT_XLARGE;
+ return true;
+ },
+};
+Object.setPrototypeOf(PasswordTracker.prototype, LegacyTracker.prototype);
+
+export class PasswordValidator extends CollectionValidator {
+ constructor() {
+ super("passwords", "id", [
+ "hostname",
+ "formSubmitURL",
+ "httpRealm",
+ "password",
+ "passwordField",
+ "username",
+ "usernameField",
+ ]);
+ }
+
+ getClientItems() {
+ let logins = Services.logins.getAllLogins();
+ let syncHosts = Utils.getSyncCredentialsHosts();
+ let result = logins
+ .map(l => l.QueryInterface(Ci.nsILoginMetaInfo))
+ .filter(l => !syncHosts.has(l.origin));
+ return Promise.resolve(result);
+ }
+
+ normalizeClientItem(item) {
+ return {
+ id: item.guid,
+ guid: item.guid,
+ hostname: item.hostname,
+ formSubmitURL: item.formSubmitURL,
+ httpRealm: item.httpRealm,
+ password: item.password,
+ passwordField: item.passwordField,
+ username: item.username,
+ usernameField: item.usernameField,
+ original: item,
+ };
+ }
+
+ async normalizeServerItem(item) {
+ return Object.assign({ guid: item.id }, item);
+ }
+}
diff --git a/services/sync/modules/engines/prefs.sys.mjs b/services/sync/modules/engines/prefs.sys.mjs
new file mode 100644
index 0000000000..13d8a63070
--- /dev/null
+++ b/services/sync/modules/engines/prefs.sys.mjs
@@ -0,0 +1,467 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Prefs which start with this prefix are our "control" prefs - they indicate
+// which preferences should be synced.
+const PREF_SYNC_PREFS_PREFIX = "services.sync.prefs.sync.";
+
+// Prefs which have a default value are usually not synced - however, if the
+// preference exists under this prefix and the value is:
+// * `true`, then we do sync default values.
+// * `false`, then as soon as we ever sync a non-default value out, or sync
+// any value in, then we toggle the value to `true`.
+//
+// We never explicitly set this pref back to false, so it's one-shot.
+// Some preferences which are known to have a different default value on
+// different platforms have this preference with a default value of `false`,
+// so they don't sync until one device changes to the non-default value, then
+// that value forever syncs, even if it gets reset back to the default.
+// Note that preferences handled this way *must also* have the "normal"
+// control pref set.
+// A possible future enhancement would be to sync these prefs so that
+// other distributions can flag them if they change the default, but that
+// doesn't seem worthwhile until we can be confident they'd actually create
+// this special control pref at the same time they flip the default.
+const PREF_SYNC_SEEN_PREFIX = "services.sync.prefs.sync-seen.";
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+import { Preferences } from "resource://gre/modules/Preferences.sys.mjs";
+
+import {
+ Store,
+ SyncEngine,
+ Tracker,
+} from "resource://services-sync/engines.sys.mjs";
+import { CryptoWrapper } from "resource://services-sync/record.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+import { SCORE_INCREMENT_XLARGE } from "resource://services-sync/constants.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+const lazy = {};
+
+XPCOMUtils.defineLazyGetter(lazy, "PREFS_GUID", () =>
+ CommonUtils.encodeBase64URL(Services.appinfo.ID)
+);
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+});
+
+// In bug 1538015, we decided that it isn't always safe to allow all "incoming"
+// preferences to be applied locally. So we have introduced another preference,
+// which if false (the default) will ignore all incoming preferences which don't
+// already have the "control" preference locally set. If this new
+// preference is set to true, then we continue our old behavior of allowing all
+// preferences to be updated, even those which don't already have a local
+// "control" pref.
+const PREF_SYNC_PREFS_ARBITRARY =
+ "services.sync.prefs.dangerously_allow_arbitrary";
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "ALLOW_ARBITRARY",
+ PREF_SYNC_PREFS_ARBITRARY
+);
+
+// The SUMO supplied URL we log with more information about how custom prefs can
+// continue to be synced. SUMO have told us that this URL will remain "stable".
+const PREFS_DOC_URL_TEMPLATE =
+ "https://support.mozilla.org/1/firefox/%VERSION%/%OS%/%LOCALE%/sync-custom-preferences";
+XPCOMUtils.defineLazyGetter(lazy, "PREFS_DOC_URL", () =>
+ Services.urlFormatter.formatURL(PREFS_DOC_URL_TEMPLATE)
+);
+
+// Check for a local control pref or PREF_SYNC_PREFS_ARBITRARY
+function isAllowedPrefName(prefName) {
+ if (prefName == PREF_SYNC_PREFS_ARBITRARY) {
+ return false; // never allow this.
+ }
+ if (lazy.ALLOW_ARBITRARY) {
+ // user has set the "dangerous" pref, so everything is allowed.
+ return true;
+ }
+ // The pref must already have a control pref set, although it doesn't matter
+ // here whether that value is true or false. We can't use prefHasUserValue
+ // here because we also want to check prefs still with default values.
+ try {
+ Services.prefs.getBoolPref(PREF_SYNC_PREFS_PREFIX + prefName);
+ // pref exists!
+ return true;
+ } catch (_) {
+ return false;
+ }
+}
+
+export function PrefRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+
+PrefRec.prototype = {
+ _logName: "Sync.Record.Pref",
+};
+Object.setPrototypeOf(PrefRec.prototype, CryptoWrapper.prototype);
+
+Utils.deferGetSet(PrefRec, "cleartext", ["value"]);
+
+export function PrefsEngine(service) {
+ SyncEngine.call(this, "Prefs", service);
+}
+
+PrefsEngine.prototype = {
+ _storeObj: PrefStore,
+ _trackerObj: PrefTracker,
+ _recordObj: PrefRec,
+ version: 2,
+
+ syncPriority: 1,
+ allowSkippedRecord: false,
+
+ async getChangedIDs() {
+ // No need for a proper timestamp (no conflict resolution needed).
+ let changedIDs = {};
+ if (this._tracker.modified) {
+ changedIDs[lazy.PREFS_GUID] = 0;
+ }
+ return changedIDs;
+ },
+
+ async _wipeClient() {
+ await SyncEngine.prototype._wipeClient.call(this);
+ this.justWiped = true;
+ },
+
+ async _reconcile(item) {
+ // Apply the incoming item if we don't care about the local data
+ if (this.justWiped) {
+ this.justWiped = false;
+ return true;
+ }
+ return SyncEngine.prototype._reconcile.call(this, item);
+ },
+
+ async trackRemainingChanges() {
+ if (this._modified.count() > 0) {
+ this._tracker.modified = true;
+ }
+ },
+};
+Object.setPrototypeOf(PrefsEngine.prototype, SyncEngine.prototype);
+
+// We don't use services.sync.engine.tabs.filteredSchemes since it includes
+// about: pages and the like, which we want to be syncable in preferences.
+// Blob, moz-extension, data and file uris are never safe to sync,
+// so we limit our check to those.
+const UNSYNCABLE_URL_REGEXP = /^(moz-extension|blob|data|file):/i;
+function isUnsyncableURLPref(prefName) {
+ if (Services.prefs.getPrefType(prefName) != Ci.nsIPrefBranch.PREF_STRING) {
+ return false;
+ }
+ const prefValue = Services.prefs.getStringPref(prefName, "");
+ return UNSYNCABLE_URL_REGEXP.test(prefValue);
+}
+
+function PrefStore(name, engine) {
+ Store.call(this, name, engine);
+ Svc.Obs.add(
+ "profile-before-change",
+ function () {
+ this.__prefs = null;
+ },
+ this
+ );
+}
+PrefStore.prototype = {
+ __prefs: null,
+ get _prefs() {
+ if (!this.__prefs) {
+ this.__prefs = new Preferences();
+ }
+ return this.__prefs;
+ },
+
+ _getSyncPrefs() {
+ let syncPrefs = Services.prefs
+ .getBranch(PREF_SYNC_PREFS_PREFIX)
+ .getChildList("")
+ .filter(pref => isAllowedPrefName(pref) && !isUnsyncableURLPref(pref));
+ // Also sync preferences that determine which prefs get synced.
+ let controlPrefs = syncPrefs.map(pref => PREF_SYNC_PREFS_PREFIX + pref);
+ return controlPrefs.concat(syncPrefs);
+ },
+
+ _isSynced(pref) {
+ if (pref.startsWith(PREF_SYNC_PREFS_PREFIX)) {
+ // this is an incoming control pref, which is ignored if there's not already
+ // a local control pref for the preference.
+ let controlledPref = pref.slice(PREF_SYNC_PREFS_PREFIX.length);
+ return isAllowedPrefName(controlledPref);
+ }
+
+ // This is the pref itself - it must be both allowed, and have a control
+ // pref which is true.
+ if (!this._prefs.get(PREF_SYNC_PREFS_PREFIX + pref, false)) {
+ return false;
+ }
+ return isAllowedPrefName(pref);
+ },
+
+ _getAllPrefs() {
+ let values = {};
+ for (let pref of this._getSyncPrefs()) {
+ // Note: _isSynced doesn't call isUnsyncableURLPref since it would cause
+ // us not to apply (syncable) changes to preferences that are set locally
+ // which have unsyncable urls.
+ if (this._isSynced(pref) && !isUnsyncableURLPref(pref)) {
+ let isSet = this._prefs.isSet(pref);
+ // Missing and default prefs get the null value, unless that `seen`
+ // pref is set, in which case it always gets the value.
+ let forceValue = this._prefs.get(PREF_SYNC_SEEN_PREFIX + pref, false);
+ values[pref] = isSet || forceValue ? this._prefs.get(pref, null) : null;
+ // If this is a special "sync-seen" pref, and it's not the default value,
+ // set the seen pref to true.
+ if (
+ isSet &&
+ this._prefs.get(PREF_SYNC_SEEN_PREFIX + pref, false) === false
+ ) {
+ this._log.trace(`toggling sync-seen pref for '${pref}' to true`);
+ this._prefs.set(PREF_SYNC_SEEN_PREFIX + pref, true);
+ }
+ }
+ }
+ return values;
+ },
+
+ _setAllPrefs(values) {
+ const selectedThemeIDPref = "extensions.activeThemeID";
+ let selectedThemeIDBefore = this._prefs.get(selectedThemeIDPref, null);
+ let selectedThemeIDAfter = selectedThemeIDBefore;
+
+ // Update 'services.sync.prefs.sync.foo.pref' before 'foo.pref', otherwise
+ // _isSynced returns false when 'foo.pref' doesn't exist (e.g., on a new device).
+ let prefs = Object.keys(values).sort(
+ a => -a.indexOf(PREF_SYNC_PREFS_PREFIX)
+ );
+ for (let pref of prefs) {
+ let value = values[pref];
+ if (!this._isSynced(pref)) {
+ // An extra complication just so we can warn when we decline to sync a
+ // preference due to no local control pref.
+ if (!pref.startsWith(PREF_SYNC_PREFS_PREFIX)) {
+ // this is an incoming pref - if the incoming value is not null and
+ // there's no local control pref, then it means we would have previously
+ // applied a value, but now will decline to.
+ // We need to check this here rather than in _isSynced because the
+ // default list of prefs we sync has changed, so we don't want to report
+ // this message when we wouldn't have actually applied a value.
+ // We should probably remove all of this in ~ Firefox 80.
+ if (value !== null) {
+ // null means "use the default value"
+ let controlPref = PREF_SYNC_PREFS_PREFIX + pref;
+ let controlPrefExists;
+ try {
+ Services.prefs.getBoolPref(controlPref);
+ controlPrefExists = true;
+ } catch (ex) {
+ controlPrefExists = false;
+ }
+ if (!controlPrefExists) {
+ // This is a long message and written to both the sync log and the
+ // console, but note that users who have not customized the control
+ // prefs will never see this.
+ let msg =
+ `Not syncing the preference '${pref}' because it has no local ` +
+ `control preference (${PREF_SYNC_PREFS_PREFIX}${pref}) and ` +
+ `the preference ${PREF_SYNC_PREFS_ARBITRARY} isn't true. ` +
+ `See ${lazy.PREFS_DOC_URL} for more information`;
+ console.warn(msg);
+ this._log.warn(msg);
+ }
+ }
+ }
+ continue;
+ }
+
+ if (typeof value == "string" && UNSYNCABLE_URL_REGEXP.test(value)) {
+ this._log.trace(`Skipping incoming unsyncable url for pref: ${pref}`);
+ continue;
+ }
+
+ switch (pref) {
+ // Some special prefs we don't want to set directly.
+ case selectedThemeIDPref:
+ selectedThemeIDAfter = value;
+ break;
+
+ // default is to just set the pref
+ default:
+ if (value == null) {
+ // Pref has gone missing. The best we can do is reset it.
+ this._prefs.reset(pref);
+ } else {
+ try {
+ this._prefs.set(pref, value);
+ } catch (ex) {
+ this._log.trace(`Failed to set pref: ${pref}`, ex);
+ }
+ }
+ // If there's a "sync-seen" pref for this it gets toggled to true
+ // regardless of the value.
+ let seenPref = PREF_SYNC_SEEN_PREFIX + pref;
+ if (this._prefs.get(seenPref, undefined) === false) {
+ this._prefs.set(PREF_SYNC_SEEN_PREFIX + pref, true);
+ }
+ }
+ }
+ // Themes are a little messy. Themes which have been installed are handled
+ // by the addons engine - but default themes aren't seen by that engine.
+ // So if there's a new default theme ID and that ID corresponds to a
+ // system addon, then we arrange to enable that addon here.
+ if (selectedThemeIDBefore != selectedThemeIDAfter) {
+ this._maybeEnableBuiltinTheme(selectedThemeIDAfter).catch(e => {
+ this._log.error("Failed to maybe update the default theme", e);
+ });
+ }
+ },
+
+ async _maybeEnableBuiltinTheme(themeId) {
+ let addon = null;
+ try {
+ addon = await lazy.AddonManager.getAddonByID(themeId);
+ } catch (ex) {
+ this._log.trace(
+ `There's no addon with ID '${themeId} - it can't be a builtin theme`
+ );
+ return;
+ }
+ if (addon && addon.isBuiltin && addon.type == "theme") {
+ this._log.trace(`Enabling builtin theme '${themeId}'`);
+ await addon.enable();
+ } else {
+ this._log.trace(
+ `Have incoming theme ID of '${themeId}' but it's not a builtin theme`
+ );
+ }
+ },
+
+ async getAllIDs() {
+ /* We store all prefs in just one WBO, with just one GUID */
+ let allprefs = {};
+ allprefs[lazy.PREFS_GUID] = true;
+ return allprefs;
+ },
+
+ async changeItemID(oldID, newID) {
+ this._log.trace("PrefStore GUID is constant!");
+ },
+
+ async itemExists(id) {
+ return id === lazy.PREFS_GUID;
+ },
+
+ async createRecord(id, collection) {
+ let record = new PrefRec(collection, id);
+
+ if (id == lazy.PREFS_GUID) {
+ record.value = this._getAllPrefs();
+ } else {
+ record.deleted = true;
+ }
+
+ return record;
+ },
+
+ async create(record) {
+ this._log.trace("Ignoring create request");
+ },
+
+ async remove(record) {
+ this._log.trace("Ignoring remove request");
+ },
+
+ async update(record) {
+ // Silently ignore pref updates that are for other apps.
+ if (record.id != lazy.PREFS_GUID) {
+ return;
+ }
+
+ this._log.trace("Received pref updates, applying...");
+ this._setAllPrefs(record.value);
+ },
+
+ async wipe() {
+ this._log.trace("Ignoring wipe request");
+ },
+};
+Object.setPrototypeOf(PrefStore.prototype, Store.prototype);
+
+function PrefTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ this._ignoreAll = false;
+ Svc.Obs.add("profile-before-change", this.asyncObserver);
+}
+PrefTracker.prototype = {
+ get ignoreAll() {
+ return this._ignoreAll;
+ },
+
+ set ignoreAll(value) {
+ this._ignoreAll = value;
+ },
+
+ get modified() {
+ return Svc.Prefs.get("engine.prefs.modified", false);
+ },
+ set modified(value) {
+ Svc.Prefs.set("engine.prefs.modified", value);
+ },
+
+ clearChangedIDs: function clearChangedIDs() {
+ this.modified = false;
+ },
+
+ __prefs: null,
+ get _prefs() {
+ if (!this.__prefs) {
+ this.__prefs = new Preferences();
+ }
+ return this.__prefs;
+ },
+
+ onStart() {
+ Services.prefs.addObserver("", this.asyncObserver);
+ },
+
+ onStop() {
+ this.__prefs = null;
+ Services.prefs.removeObserver("", this.asyncObserver);
+ },
+
+ async observe(subject, topic, data) {
+ switch (topic) {
+ case "profile-before-change":
+ await this.stop();
+ break;
+ case "nsPref:changed":
+ if (this.ignoreAll) {
+ break;
+ }
+ // Trigger a sync for MULTI-DEVICE for a change that determines
+ // which prefs are synced or a regular pref change.
+ if (
+ data.indexOf(PREF_SYNC_PREFS_PREFIX) == 0 ||
+ this._prefs.get(PREF_SYNC_PREFS_PREFIX + data, false)
+ ) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ this.modified = true;
+ this._log.trace("Preference " + data + " changed");
+ }
+ break;
+ }
+ },
+};
+Object.setPrototypeOf(PrefTracker.prototype, Tracker.prototype);
+
+export function getPrefsGUIDForTest() {
+ return lazy.PREFS_GUID;
+}
diff --git a/services/sync/modules/engines/tabs.sys.mjs b/services/sync/modules/engines/tabs.sys.mjs
new file mode 100644
index 0000000000..45242b71a7
--- /dev/null
+++ b/services/sync/modules/engines/tabs.sys.mjs
@@ -0,0 +1,624 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const STORAGE_VERSION = 1; // This needs to be kept in-sync with the rust storage version
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+import {
+ SCORE_INCREMENT_SMALL,
+ STATUS_OK,
+ URI_LENGTH_MAX,
+} from "resource://services-sync/constants.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { Async } from "resource://services-common/async.sys.mjs";
+import {
+ SyncRecord,
+ SyncTelemetry,
+} from "resource://services-sync/telemetry.sys.mjs";
+import { BridgedEngine } from "resource://services-sync/bridged_engine.sys.mjs";
+
+const FAR_FUTURE = 4102405200000; // 2100/01/01
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs",
+ PrivateBrowsingUtils: "resource://gre/modules/PrivateBrowsingUtils.sys.mjs",
+ ReaderMode: "resource://gre/modules/ReaderMode.sys.mjs",
+ TabsStore: "resource://gre/modules/RustTabs.sys.mjs",
+});
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "TABS_FILTERED_SCHEMES",
+ "services.sync.engine.tabs.filteredSchemes",
+ "",
+ null,
+ val => {
+ return new Set(val.split("|"));
+ }
+);
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "SYNC_AFTER_DELAY_MS",
+ "services.sync.syncedTabs.syncDelayAfterTabChange",
+ 0
+);
+
+// A "bridged engine" to our tabs component.
+export function TabEngine(service) {
+ BridgedEngine.call(this, "Tabs", service);
+}
+
+TabEngine.prototype = {
+ _trackerObj: TabTracker,
+ syncPriority: 3,
+
+ async prepareTheBridge(isQuickWrite) {
+ let clientsEngine = this.service.clientsEngine;
+ // Tell the bridged engine about clients.
+ // This is the same shape as ClientData in app-services.
+ // schema: https://github.com/mozilla/application-services/blob/a1168751231ed4e88c44d85f6dccc09c3b412bd2/components/sync15/src/client_types.rs#L14
+ let clientData = {
+ local_client_id: clientsEngine.localID,
+ recent_clients: {},
+ };
+
+ // We shouldn't upload tabs past what the server will accept
+ let tabs = await this.getTabsWithinPayloadSize();
+ await this._rustStore.setLocalTabs(
+ tabs.map(tab => {
+ // rust wants lastUsed in MS but the provider gives it in seconds
+ tab.lastUsed = tab.lastUsed * 1000;
+ return tab;
+ })
+ );
+
+ for (let remoteClient of clientsEngine.remoteClients) {
+ let id = remoteClient.id;
+ if (!id) {
+ throw new Error("Remote client somehow did not have an id");
+ }
+ let client = {
+ fxa_device_id: remoteClient.fxaDeviceId,
+ // device_name and device_type are soft-deprecated - every client
+ // prefers what's in the FxA record. But fill them correctly anyway.
+ device_name: clientsEngine.getClientName(id) ?? "",
+ device_type: clientsEngine.getClientType(id),
+ };
+ clientData.recent_clients[id] = client;
+ }
+
+ // put ourself in there too so we record the correct device info in our sync record.
+ clientData.recent_clients[clientsEngine.localID] = {
+ fxa_device_id: await clientsEngine.fxAccounts.device.getLocalId(),
+ device_name: clientsEngine.localName,
+ device_type: clientsEngine.localType,
+ };
+
+ // Quick write needs to adjust the lastSync so we can POST to the server
+ // see quickWrite() for details
+ if (isQuickWrite) {
+ await this.setLastSync(FAR_FUTURE);
+ await this._bridge.prepareForSync(JSON.stringify(clientData));
+ return;
+ }
+
+ // Just incase we crashed while the lastSync timestamp was FAR_FUTURE, we
+ // reset it to zero
+ if ((await this.getLastSync()) === FAR_FUTURE) {
+ await this._bridge.setLastSync(0);
+ }
+ await this._bridge.prepareForSync(JSON.stringify(clientData));
+ },
+
+ async _syncStartup() {
+ await super._syncStartup();
+ await this.prepareTheBridge();
+ },
+
+ async initialize() {
+ await SyncEngine.prototype.initialize.call(this);
+
+ let path = PathUtils.join(PathUtils.profileDir, "synced-tabs.db");
+ this._rustStore = await lazy.TabsStore.init(path);
+ this._bridge = await this._rustStore.bridgedEngine();
+
+ // Uniffi doesn't currently only support async methods, so we'll need to hardcode
+ // these values for now (which is fine for now as these hardly ever change)
+ this._bridge.storageVersion = STORAGE_VERSION;
+ this._bridge.allowSkippedRecord = true;
+
+ this._log.info("Got a bridged engine!");
+ this._tracker.modified = true;
+ },
+
+ async getChangedIDs() {
+ // No need for a proper timestamp (no conflict resolution needed).
+ let changedIDs = {};
+ if (this._tracker.modified) {
+ changedIDs[this.service.clientsEngine.localID] = 0;
+ }
+ return changedIDs;
+ },
+
+ // API for use by Sync UI code to give user choices of tabs to open.
+ async getAllClients() {
+ let remoteTabs = await this._rustStore.getAll();
+ let remoteClientTabs = [];
+ for (let remoteClient of this.service.clientsEngine.remoteClients) {
+ // We get the some client info from the rust tabs engine and some from
+ // the clients engine.
+ let rustClient = remoteTabs.find(
+ x => x.clientId === remoteClient.fxaDeviceId
+ );
+ if (!rustClient) {
+ continue;
+ }
+ let client = {
+ // rust gives us ms but js uses seconds, so fix them up.
+ tabs: rustClient.remoteTabs.map(tab => {
+ tab.lastUsed = tab.lastUsed / 1000;
+ return tab;
+ }),
+ lastModified: rustClient.lastModified / 1000,
+ ...remoteClient,
+ };
+ remoteClientTabs.push(client);
+ }
+ return remoteClientTabs;
+ },
+
+ async removeClientData() {
+ let url = this.engineURL + "/" + this.service.clientsEngine.localID;
+ await this.service.resource(url).delete();
+ },
+
+ async trackRemainingChanges() {
+ if (this._modified.count() > 0) {
+ this._tracker.modified = true;
+ }
+ },
+
+ async getTabsWithinPayloadSize() {
+ const maxPayloadSize = this.service.getMaxRecordPayloadSize();
+ // See bug 535326 comment 8 for an explanation of the estimation
+ const maxSerializedSize = (maxPayloadSize / 4) * 3 - 1500;
+ return TabProvider.getAllTabsWithEstimatedMax(true, maxSerializedSize);
+ },
+
+ // Support for "quick writes"
+ _engineLock: Utils.lock,
+ _engineLocked: false,
+
+ // Tabs has a special lock to help support its "quick write"
+ get locked() {
+ return this._engineLocked;
+ },
+ lock() {
+ if (this._engineLocked) {
+ return false;
+ }
+ this._engineLocked = true;
+ return true;
+ },
+ unlock() {
+ this._engineLocked = false;
+ },
+
+ // Quickly do a POST of our current tabs if possible.
+ // This does things that would be dangerous for other engines - eg, posting
+ // without checking what's on the server could cause data-loss for other
+ // engines, but because each device exclusively owns exactly 1 tabs record
+ // with a known ID, it's safe here.
+ // Returns true if we successfully synced, false otherwise (either on error
+ // or because we declined to sync for any reason.) The return value is
+ // primarily for tests.
+ async quickWrite() {
+ if (!this.enabled) {
+ // this should be very rare, and only if tabs are disabled after the
+ // timer is created.
+ this._log.info("Can't do a quick-sync as tabs is disabled");
+ return false;
+ }
+ // This quick-sync doesn't drive the login state correctly, so just
+ // decline to sync if out status is bad
+ if (this.service.status.checkSetup() != STATUS_OK) {
+ this._log.info(
+ "Can't do a quick-sync due to the service status",
+ this.service.status.toString()
+ );
+ return false;
+ }
+ if (!this.service.serverConfiguration) {
+ this._log.info("Can't do a quick sync before the first full sync");
+ return false;
+ }
+ try {
+ return await this._engineLock("tabs.js: quickWrite", async () => {
+ // We want to restore the lastSync timestamp when complete so next sync
+ // takes tabs written by other devices since our last real sync.
+ // And for this POST we don't want the protections offered by
+ // X-If-Unmodified-Since - we want the POST to work even if the remote
+ // has moved on and we will catch back up next full sync.
+ const origLastSync = await this.getLastSync();
+ try {
+ return this._doQuickWrite();
+ } finally {
+ // set the lastSync to it's original value for regular sync
+ await this.setLastSync(origLastSync);
+ }
+ })();
+ } catch (ex) {
+ if (!Utils.isLockException(ex)) {
+ throw ex;
+ }
+ this._log.info(
+ "Can't do a quick-write as another tab sync is in progress"
+ );
+ return false;
+ }
+ },
+
+ // The guts of the quick-write sync, after we've taken the lock, checked
+ // the service status etc.
+ async _doQuickWrite() {
+ // We need to track telemetry for these syncs too!
+ const name = "tabs";
+ let telemetryRecord = new SyncRecord(
+ SyncTelemetry.allowedEngines,
+ "quick-write"
+ );
+ telemetryRecord.onEngineStart(name);
+ try {
+ Async.checkAppReady();
+ // We need to prep the bridge before we try to POST since it grabs
+ // the most recent local client id and properly sets a lastSync
+ // which is needed for a proper POST request
+ await this.prepareTheBridge(true);
+ this._tracker.clearChangedIDs();
+ this._tracker.resetScore();
+
+ Async.checkAppReady();
+ // now just the "upload" part of a sync,
+ // which for a rust engine is not obvious.
+ // We need to do is ask the rust engine for the changes. Although
+ // this is kinda abusing the bridged-engine interface, we know the tabs
+ // implementation of it works ok
+ let outgoing = await this._bridge.apply();
+ // We know we always have exactly 1 record.
+ let mine = outgoing[0];
+ this._log.trace("outgoing bso", mine);
+ // `this._recordObj` is a `BridgedRecord`, which isn't exported.
+ let record = this._recordObj.fromOutgoingBso(this.name, JSON.parse(mine));
+ let changeset = {};
+ changeset[record.id] = { synced: false, record };
+ this._modified.replace(changeset);
+
+ Async.checkAppReady();
+ await this._uploadOutgoing();
+ telemetryRecord.onEngineStop(name, null);
+ return true;
+ } catch (ex) {
+ this._log.warn("quicksync sync failed", ex);
+ telemetryRecord.onEngineStop(name, ex);
+ return false;
+ } finally {
+ // The top-level sync is never considered to fail here, just the engine
+ telemetryRecord.finished(null);
+ SyncTelemetry.takeTelemetryRecord(telemetryRecord);
+ }
+ },
+
+ async _sync() {
+ try {
+ await this._engineLock("tabs.js: fullSync", async () => {
+ await super._sync();
+ })();
+ } catch (ex) {
+ if (!Utils.isLockException(ex)) {
+ throw ex;
+ }
+ this._log.info(
+ "Can't do full tabs sync as a quick-write is currently running"
+ );
+ }
+ },
+};
+Object.setPrototypeOf(TabEngine.prototype, BridgedEngine.prototype);
+
+export const TabProvider = {
+ getWindowEnumerator() {
+ return Services.wm.getEnumerator("navigator:browser");
+ },
+
+ shouldSkipWindow(win) {
+ return win.closed || lazy.PrivateBrowsingUtils.isWindowPrivate(win);
+ },
+
+ getAllBrowserTabs() {
+ let tabs = [];
+ for (let win of this.getWindowEnumerator()) {
+ if (this.shouldSkipWindow(win)) {
+ continue;
+ }
+ // Get all the tabs from the browser
+ for (let tab of win.gBrowser.tabs) {
+ tabs.push(tab);
+ }
+ }
+
+ return tabs.sort(function (a, b) {
+ return b.lastAccessed - a.lastAccessed;
+ });
+ },
+
+ // This function creates tabs records up to a specified amount of bytes
+ // It is an "estimation" since we don't accurately calculate how much the
+ // favicon and JSON overhead is and give a rough estimate (for optimization purposes)
+ async getAllTabsWithEstimatedMax(filter, bytesMax) {
+ let log = Log.repository.getLogger(`Sync.Engine.Tabs.Provider`);
+ let tabRecords = [];
+ let iconPromises = [];
+ let runningByteLength = 0;
+ let encoder = new TextEncoder();
+
+ // Fetch all the tabs the user has open
+ let winTabs = this.getAllBrowserTabs();
+
+ for (let tab of winTabs) {
+ // We don't want to process any more tabs than we can sync
+ if (runningByteLength >= bytesMax) {
+ log.warn(
+ `Can't fit all tabs in sync payload: have ${winTabs.length},
+ but can only fit ${tabRecords.length}.`
+ );
+ break;
+ }
+
+ // Note that we used to sync "tab history" (ie, the "back button") state,
+ // but in practice this hasn't been used - only the current URI is of
+ // interest to clients.
+ // We stopped recording this in bug 1783991.
+ if (!tab?.linkedBrowser) {
+ continue;
+ }
+ let acceptable = !filter
+ ? url => url
+ : url =>
+ url &&
+ !lazy.TABS_FILTERED_SCHEMES.has(Services.io.extractScheme(url));
+
+ let url = tab.linkedBrowser.currentURI?.spec;
+ // Special case for reader mode.
+ if (url && url.startsWith("about:reader?")) {
+ url = lazy.ReaderMode.getOriginalUrl(url);
+ }
+ // We ignore the tab completely if the current entry url is
+ // not acceptable (we need something accurate to open).
+ if (!acceptable(url)) {
+ continue;
+ }
+
+ if (url.length > URI_LENGTH_MAX) {
+ log.trace("Skipping over-long URL.");
+ continue;
+ }
+
+ let thisTab = {
+ title: tab.linkedBrowser.contentTitle || "",
+ urlHistory: [url],
+ icon: "",
+ lastUsed: Math.floor((tab.lastAccessed || 0) / 1000),
+ };
+ tabRecords.push(thisTab);
+
+ // we don't want to wait for each favicon to resolve to get the bytes
+ // so we estimate a conservative 100 chars for the favicon and json overhead
+ // Rust will further optimize and trim if we happened to be wildly off
+ runningByteLength +=
+ encoder.encode(thisTab.title + thisTab.lastUsed + url).byteLength + 100;
+
+ // Use the favicon service for the icon url - we can wait for the promises at the end.
+ let iconPromise = lazy.PlacesUtils.promiseFaviconData(url)
+ .then(iconData => {
+ thisTab.icon = iconData.uri.spec;
+ })
+ .catch(ex => {
+ log.trace(
+ `Failed to fetch favicon for ${url}`,
+ thisTab.urlHistory[0]
+ );
+ });
+ iconPromises.push(iconPromise);
+ }
+
+ await Promise.allSettled(iconPromises);
+ return tabRecords;
+ },
+};
+
+function TabTracker(name, engine) {
+ Tracker.call(this, name, engine);
+
+ // Make sure "this" pointer is always set correctly for event listeners.
+ this.onTab = Utils.bind2(this, this.onTab);
+ this._unregisterListeners = Utils.bind2(this, this._unregisterListeners);
+}
+TabTracker.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIObserver"]),
+
+ clearChangedIDs() {
+ this.modified = false;
+ },
+
+ // We do not track TabSelect because that almost always triggers
+ // the web progress listeners (onLocationChange), which we already track
+ _topics: ["TabOpen", "TabClose"],
+
+ _registerListenersForWindow(window) {
+ this._log.trace("Registering tab listeners in window");
+ for (let topic of this._topics) {
+ window.addEventListener(topic, this.onTab);
+ }
+ window.addEventListener("unload", this._unregisterListeners);
+ // If it's got a tab browser we can listen for things like navigation.
+ if (window.gBrowser) {
+ window.gBrowser.addProgressListener(this);
+ }
+ },
+
+ _unregisterListeners(event) {
+ this._unregisterListenersForWindow(event.target);
+ },
+
+ _unregisterListenersForWindow(window) {
+ this._log.trace("Removing tab listeners in window");
+ window.removeEventListener("unload", this._unregisterListeners);
+ for (let topic of this._topics) {
+ window.removeEventListener(topic, this.onTab);
+ }
+ if (window.gBrowser) {
+ window.gBrowser.removeProgressListener(this);
+ }
+ },
+
+ onStart() {
+ Svc.Obs.add("domwindowopened", this.asyncObserver);
+ for (let win of Services.wm.getEnumerator("navigator:browser")) {
+ this._registerListenersForWindow(win);
+ }
+ },
+
+ onStop() {
+ Svc.Obs.remove("domwindowopened", this.asyncObserver);
+ for (let win of Services.wm.getEnumerator("navigator:browser")) {
+ this._unregisterListenersForWindow(win);
+ }
+ },
+
+ async observe(subject, topic, data) {
+ switch (topic) {
+ case "domwindowopened":
+ let onLoad = () => {
+ subject.removeEventListener("load", onLoad);
+ // Only register after the window is done loading to avoid unloads.
+ this._registerListenersForWindow(subject);
+ };
+
+ // Add tab listeners now that a window has opened.
+ subject.addEventListener("load", onLoad);
+ break;
+ }
+ },
+
+ onTab(event) {
+ if (event.originalTarget.linkedBrowser) {
+ let browser = event.originalTarget.linkedBrowser;
+ if (
+ lazy.PrivateBrowsingUtils.isBrowserPrivate(browser) &&
+ !lazy.PrivateBrowsingUtils.permanentPrivateBrowsing
+ ) {
+ this._log.trace("Ignoring tab event from private browsing.");
+ return;
+ }
+ }
+ this._log.trace("onTab event: " + event.type);
+
+ switch (event.type) {
+ case "TabOpen":
+ /* We do not have a reliable way of checking the URI on the TabOpen
+ * so we will rely on the other methods (onLocationChange, getAllTabsWithEstimatedMax)
+ * to filter these when going through sync
+ */
+ this.callScheduleSync(SCORE_INCREMENT_SMALL);
+ break;
+ case "TabClose":
+ // If event target has `linkedBrowser`, the event target can be assumed <tab> element.
+ // Else, event target is assumed <browser> element, use the target as it is.
+ const tab = event.target.linkedBrowser || event.target;
+
+ // TabClose means the tab has already loaded and we can check the URI
+ // and ignore if it's a scheme we don't care about
+ if (lazy.TABS_FILTERED_SCHEMES.has(tab.currentURI.scheme)) {
+ return;
+ }
+ this.callScheduleSync(SCORE_INCREMENT_SMALL);
+ break;
+ }
+ },
+
+ // web progress listeners.
+ onLocationChange(webProgress, request, locationURI, flags) {
+ // We only care about top-level location changes. We do want location changes in the
+ // same document because if a page uses the `pushState()` API, they *appear* as though
+ // they are in the same document even if the URL changes. It also doesn't hurt to accurately
+ // reflect the fragment changing - so we allow LOCATION_CHANGE_SAME_DOCUMENT
+ if (
+ flags & Ci.nsIWebProgressListener.LOCATION_CHANGE_RELOAD ||
+ !webProgress.isTopLevel ||
+ !locationURI
+ ) {
+ return;
+ }
+
+ // We can't filter out tabs that we don't sync here, because we might be
+ // navigating from a tab that we *did* sync to one we do not, and that
+ // tab we *did* sync should no longer be synced.
+ this.callScheduleSync();
+ },
+
+ callScheduleSync(scoreIncrement) {
+ this.modified = true;
+ let { scheduler } = this.engine.service;
+ let delayInMs = lazy.SYNC_AFTER_DELAY_MS;
+
+ // Schedule a sync once we detect a tab change
+ // to ensure the server always has the most up to date tabs
+ if (
+ delayInMs > 0 &&
+ scheduler.numClients > 1 // Only schedule quick syncs for multi client users
+ ) {
+ if (this.tabsQuickWriteTimer) {
+ this._log.debug(
+ "Detected a tab change, but a quick-write is already scheduled"
+ );
+ return;
+ }
+ this._log.debug(
+ "Detected a tab change: scheduling a quick-write in " + delayInMs + "ms"
+ );
+ CommonUtils.namedTimer(
+ () => {
+ this._log.trace("tab quick-sync timer fired.");
+ this.engine
+ .quickWrite()
+ .then(() => {
+ this._log.trace("tab quick-sync done.");
+ })
+ .catch(ex => {
+ this._log.error("tab quick-sync failed.", ex);
+ });
+ },
+ delayInMs,
+ this,
+ "tabsQuickWriteTimer"
+ );
+ } else if (scoreIncrement) {
+ this._log.debug(
+ "Detected a tab change, but conditions aren't met for a quick write - bumping score"
+ );
+ this.score += scoreIncrement;
+ } else {
+ this._log.debug(
+ "Detected a tab change, but conditions aren't met for a quick write or a score bump"
+ );
+ }
+ },
+};
+Object.setPrototypeOf(TabTracker.prototype, Tracker.prototype);
diff --git a/services/sync/modules/keys.sys.mjs b/services/sync/modules/keys.sys.mjs
new file mode 100644
index 0000000000..b6a1dce19a
--- /dev/null
+++ b/services/sync/modules/keys.sys.mjs
@@ -0,0 +1,166 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Weave } from "resource://services-sync/main.sys.mjs";
+
+/**
+ * Represents a pair of keys.
+ *
+ * Each key stored in a key bundle is 256 bits. One key is used for symmetric
+ * encryption. The other is used for HMAC.
+ *
+ * A KeyBundle by itself is just an anonymous pair of keys. Other types
+ * deriving from this one add semantics, such as associated collections or
+ * generating a key bundle via HKDF from another key.
+ */
+function KeyBundle() {
+ this._encrypt = null;
+ this._encryptB64 = null;
+ this._hmac = null;
+ this._hmacB64 = null;
+}
+KeyBundle.prototype = {
+ _encrypt: null,
+ _encryptB64: null,
+ _hmac: null,
+ _hmacB64: null,
+
+ equals: function equals(bundle) {
+ return (
+ bundle &&
+ bundle.hmacKey == this.hmacKey &&
+ bundle.encryptionKey == this.encryptionKey
+ );
+ },
+
+ /*
+ * Accessors for the two keys.
+ */
+ get encryptionKey() {
+ return this._encrypt;
+ },
+
+ set encryptionKey(value) {
+ if (!value || typeof value != "string") {
+ throw new Error("Encryption key can only be set to string values.");
+ }
+
+ if (value.length < 16) {
+ throw new Error("Encryption key must be at least 128 bits long.");
+ }
+
+ this._encrypt = value;
+ this._encryptB64 = btoa(value);
+ },
+
+ get encryptionKeyB64() {
+ return this._encryptB64;
+ },
+
+ get hmacKey() {
+ return this._hmac;
+ },
+
+ set hmacKey(value) {
+ if (!value || typeof value != "string") {
+ throw new Error("HMAC key can only be set to string values.");
+ }
+
+ if (value.length < 16) {
+ throw new Error("HMAC key must be at least 128 bits long.");
+ }
+
+ this._hmac = value;
+ this._hmacB64 = btoa(value);
+ },
+
+ get hmacKeyB64() {
+ return this._hmacB64;
+ },
+
+ /**
+ * Populate this key pair with 2 new, randomly generated keys.
+ */
+ async generateRandom() {
+ // Compute both at that same time
+ let [generatedHMAC, generatedEncr] = await Promise.all([
+ Weave.Crypto.generateRandomKey(),
+ Weave.Crypto.generateRandomKey(),
+ ]);
+ this.keyPairB64 = [generatedEncr, generatedHMAC];
+ },
+};
+
+/**
+ * Represents a KeyBundle associated with a collection.
+ *
+ * This is just a KeyBundle with a collection attached.
+ */
+export function BulkKeyBundle(collection) {
+ let log = Log.repository.getLogger("Sync.BulkKeyBundle");
+ log.info("BulkKeyBundle being created for " + collection);
+ KeyBundle.call(this);
+
+ this._collection = collection;
+}
+
+BulkKeyBundle.fromHexKey = function (hexKey) {
+ let key = CommonUtils.hexToBytes(hexKey);
+ let bundle = new BulkKeyBundle();
+ // [encryptionKey, hmacKey]
+ bundle.keyPair = [key.slice(0, 32), key.slice(32, 64)];
+ return bundle;
+};
+
+BulkKeyBundle.fromJWK = function (jwk) {
+ if (!jwk || !jwk.k || jwk.kty !== "oct") {
+ throw new Error("Invalid JWK provided to BulkKeyBundle.fromJWK");
+ }
+ return BulkKeyBundle.fromHexKey(CommonUtils.base64urlToHex(jwk.k));
+};
+
+BulkKeyBundle.prototype = {
+ get collection() {
+ return this._collection;
+ },
+
+ /**
+ * Obtain the key pair in this key bundle.
+ *
+ * The returned keys are represented as raw byte strings.
+ */
+ get keyPair() {
+ return [this.encryptionKey, this.hmacKey];
+ },
+
+ set keyPair(value) {
+ if (!Array.isArray(value) || value.length != 2) {
+ throw new Error("BulkKeyBundle.keyPair value must be array of 2 keys.");
+ }
+
+ this.encryptionKey = value[0];
+ this.hmacKey = value[1];
+ },
+
+ get keyPairB64() {
+ return [this.encryptionKeyB64, this.hmacKeyB64];
+ },
+
+ set keyPairB64(value) {
+ if (!Array.isArray(value) || value.length != 2) {
+ throw new Error(
+ "BulkKeyBundle.keyPairB64 value must be an array of 2 keys."
+ );
+ }
+
+ this.encryptionKey = CommonUtils.safeAtoB(value[0]);
+ this.hmacKey = CommonUtils.safeAtoB(value[1]);
+ },
+};
+
+Object.setPrototypeOf(BulkKeyBundle.prototype, KeyBundle.prototype);
diff --git a/services/sync/modules/main.sys.mjs b/services/sync/modules/main.sys.mjs
new file mode 100644
index 0000000000..283d4c7f09
--- /dev/null
+++ b/services/sync/modules/main.sys.mjs
@@ -0,0 +1,25 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+export { lazy as Weave };
+
+const lazy = {};
+
+// We want these to be lazily loaded, which helps performance and also tests
+// to not have these loaded before they are ready.
+// eslint-disable-next-line mozilla/valid-lazy
+ChromeUtils.defineESModuleGetters(lazy, {
+ Service: "resource://services-sync/service.sys.mjs",
+ Status: "resource://services-sync/status.sys.mjs",
+ Svc: "resource://services-sync/util.sys.mjs",
+ Utils: "resource://services-sync/util.sys.mjs",
+});
+
+// eslint-disable-next-line mozilla/valid-lazy
+ChromeUtils.defineLazyGetter(lazy, "Crypto", () => {
+ let { WeaveCrypto } = ChromeUtils.importESModule(
+ "resource://services-crypto/WeaveCrypto.sys.mjs"
+ );
+ return new WeaveCrypto();
+});
diff --git a/services/sync/modules/policies.sys.mjs b/services/sync/modules/policies.sys.mjs
new file mode 100644
index 0000000000..68a6645ef4
--- /dev/null
+++ b/services/sync/modules/policies.sys.mjs
@@ -0,0 +1,1057 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import {
+ CREDENTIALS_CHANGED,
+ ENGINE_APPLY_FAIL,
+ ENGINE_UNKNOWN_FAIL,
+ IDLE_OBSERVER_BACK_DELAY,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_LOGIN_REJECTED,
+ LOGIN_FAILED_NETWORK_ERROR,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_SUCCEEDED,
+ MASTER_PASSWORD_LOCKED,
+ MASTER_PASSWORD_LOCKED_RETRY_INTERVAL,
+ MAX_ERROR_COUNT_BEFORE_BACKOFF,
+ MINIMUM_BACKOFF_INTERVAL,
+ MULTI_DEVICE_THRESHOLD,
+ NO_SYNC_NODE_FOUND,
+ NO_SYNC_NODE_INTERVAL,
+ OVER_QUOTA,
+ RESPONSE_OVER_QUOTA,
+ SCORE_UPDATE_DELAY,
+ SERVER_MAINTENANCE,
+ SINGLE_USER_THRESHOLD,
+ STATUS_OK,
+ SYNC_FAILED_PARTIAL,
+ SYNC_SUCCEEDED,
+ kSyncBackoffNotMet,
+ kSyncMasterPasswordLocked,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+const { logManager } = ChromeUtils.import(
+ "resource://gre/modules/FxAccountsCommon.js"
+);
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AddonManager: "resource://gre/modules/AddonManager.sys.mjs",
+ Status: "resource://services-sync/status.sys.mjs",
+});
+XPCOMUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "IdleService",
+ "@mozilla.org/widget/useridleservice;1",
+ "nsIUserIdleService"
+);
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "CaptivePortalService",
+ "@mozilla.org/network/captive-portal-service;1",
+ "nsICaptivePortalService"
+);
+
+// Get the value for an interval that's stored in preferences. To save users
+// from themselves (and us from them!) the minimum time they can specify
+// is 60s.
+function getThrottledIntervalPreference(prefName) {
+ return Math.max(Svc.Prefs.get(prefName), 60) * 1000;
+}
+
+export function SyncScheduler(service) {
+ this.service = service;
+ this.init();
+}
+
+SyncScheduler.prototype = {
+ _log: Log.repository.getLogger("Sync.SyncScheduler"),
+
+ _fatalLoginStatus: [
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_LOGIN_REJECTED,
+ ],
+
+ /**
+ * The nsITimer object that schedules the next sync. See scheduleNextSync().
+ */
+ syncTimer: null,
+
+ setDefaults: function setDefaults() {
+ this._log.trace("Setting SyncScheduler policy values to defaults.");
+
+ this.singleDeviceInterval = getThrottledIntervalPreference(
+ "scheduler.fxa.singleDeviceInterval"
+ );
+ this.idleInterval = getThrottledIntervalPreference(
+ "scheduler.idleInterval"
+ );
+ this.activeInterval = getThrottledIntervalPreference(
+ "scheduler.activeInterval"
+ );
+ this.immediateInterval = getThrottledIntervalPreference(
+ "scheduler.immediateInterval"
+ );
+
+ // A user is non-idle on startup by default.
+ this.idle = false;
+
+ this.hasIncomingItems = false;
+ // This is the last number of clients we saw when previously updating the
+ // client mode. If this != currentNumClients (obtained from prefs written
+ // by the clients engine) then we need to transition to and from
+ // single and multi-device mode.
+ this.numClientsLastSync = 0;
+
+ this._resyncs = 0;
+
+ this.clearSyncTriggers();
+ },
+
+ // nextSync is in milliseconds, but prefs can't hold that much
+ get nextSync() {
+ return Svc.Prefs.get("nextSync", 0) * 1000;
+ },
+ set nextSync(value) {
+ Svc.Prefs.set("nextSync", Math.floor(value / 1000));
+ },
+
+ get missedFxACommandsFetchInterval() {
+ return Services.prefs.getIntPref(
+ "identity.fxaccounts.commands.missed.fetch_interval"
+ );
+ },
+
+ get missedFxACommandsLastFetch() {
+ return Services.prefs.getIntPref(
+ "identity.fxaccounts.commands.missed.last_fetch",
+ 0
+ );
+ },
+
+ set missedFxACommandsLastFetch(val) {
+ Services.prefs.setIntPref(
+ "identity.fxaccounts.commands.missed.last_fetch",
+ val
+ );
+ },
+
+ get syncInterval() {
+ return this._syncInterval;
+ },
+ set syncInterval(value) {
+ if (value != this._syncInterval) {
+ Services.prefs.setIntPref("services.sync.syncInterval", value);
+ }
+ },
+
+ get syncThreshold() {
+ return this._syncThreshold;
+ },
+ set syncThreshold(value) {
+ if (value != this._syncThreshold) {
+ Services.prefs.setIntPref("services.sync.syncThreshold", value);
+ }
+ },
+
+ get globalScore() {
+ return this._globalScore;
+ },
+ set globalScore(value) {
+ if (this._globalScore != value) {
+ Services.prefs.setIntPref("services.sync.globalScore", value);
+ }
+ },
+
+ // Managed by the clients engine (by way of prefs)
+ get numClients() {
+ return this.numDesktopClients + this.numMobileClients;
+ },
+ set numClients(value) {
+ throw new Error("Don't set numClients - the clients engine manages it.");
+ },
+
+ get offline() {
+ // Services.io.offline has slowly become fairly useless over the years - it
+ // no longer attempts to track the actual network state by default, but one
+ // thing stays true: if it says we're offline then we are definitely not online.
+ //
+ // We also ask the captive portal service if we are behind a locked captive
+ // portal.
+ //
+ // We don't check on the NetworkLinkService however, because it gave us
+ // false positives in the past in a vm environment.
+ try {
+ if (
+ Services.io.offline ||
+ lazy.CaptivePortalService.state ==
+ lazy.CaptivePortalService.LOCKED_PORTAL
+ ) {
+ return true;
+ }
+ } catch (ex) {
+ this._log.warn("Could not determine network status.", ex);
+ }
+ return false;
+ },
+
+ _initPrefGetters() {
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "idleTime",
+ "services.sync.scheduler.idleTime"
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "maxResyncs",
+ "services.sync.maxResyncs",
+ 0
+ );
+
+ // The number of clients we have is maintained in preferences via the
+ // clients engine, and only updated after a successsful sync.
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "numDesktopClients",
+ "services.sync.clients.devices.desktop",
+ 0
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "numMobileClients",
+ "services.sync.clients.devices.mobile",
+ 0
+ );
+
+ // Scheduler state that seems to be read more often than it's written.
+ // We also check if the value has changed before writing in the setters.
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_syncThreshold",
+ "services.sync.syncThreshold",
+ SINGLE_USER_THRESHOLD
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_syncInterval",
+ "services.sync.syncInterval",
+ this.singleDeviceInterval
+ );
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_globalScore",
+ "services.sync.globalScore",
+ 0
+ );
+ },
+
+ init: function init() {
+ this._log.manageLevelFromPref("services.sync.log.logger.service.main");
+ this.setDefaults();
+ this._initPrefGetters();
+ Svc.Obs.add("weave:engine:score:updated", this);
+ Svc.Obs.add("network:offline-status-changed", this);
+ Svc.Obs.add("network:link-status-changed", this);
+ Svc.Obs.add("captive-portal-detected", this);
+ Svc.Obs.add("weave:service:sync:start", this);
+ Svc.Obs.add("weave:service:sync:finish", this);
+ Svc.Obs.add("weave:engine:sync:finish", this);
+ Svc.Obs.add("weave:engine:sync:error", this);
+ Svc.Obs.add("weave:service:login:error", this);
+ Svc.Obs.add("weave:service:logout:finish", this);
+ Svc.Obs.add("weave:service:sync:error", this);
+ Svc.Obs.add("weave:service:backoff:interval", this);
+ Svc.Obs.add("weave:engine:sync:applied", this);
+ Svc.Obs.add("weave:service:setup-complete", this);
+ Svc.Obs.add("weave:service:start-over", this);
+ Svc.Obs.add("FxA:hawk:backoff:interval", this);
+
+ if (lazy.Status.checkSetup() == STATUS_OK) {
+ Svc.Obs.add("wake_notification", this);
+ Svc.Obs.add("captive-portal-login-success", this);
+ Svc.Obs.add("sleep_notification", this);
+ lazy.IdleService.addIdleObserver(this, this.idleTime);
+ }
+ },
+
+ // eslint-disable-next-line complexity
+ observe: function observe(subject, topic, data) {
+ this._log.trace("Handling " + topic);
+ switch (topic) {
+ case "weave:engine:score:updated":
+ if (lazy.Status.login == LOGIN_SUCCEEDED) {
+ CommonUtils.namedTimer(
+ this.calculateScore,
+ SCORE_UPDATE_DELAY,
+ this,
+ "_scoreTimer"
+ );
+ }
+ break;
+ case "network:link-status-changed":
+ // Note: NetworkLinkService is unreliable, we get false negatives for it
+ // in cases such as VMs (bug 1420802), so we don't want to use it in
+ // `get offline`, but we assume that it's probably reliable if we're
+ // getting status changed events. (We might be wrong about this, but
+ // if that's true, then the only downside is that we won't sync as
+ // promptly).
+ let isOffline = this.offline;
+ this._log.debug(
+ `Network link status changed to "${data}". Offline?`,
+ isOffline
+ );
+ // Data may be one of `up`, `down`, `change`, or `unknown`. We only want
+ // to sync if it's "up".
+ if (data == "up" && !isOffline) {
+ this._log.debug("Network link looks up. Syncing.");
+ this.scheduleNextSync(0, { why: topic });
+ } else if (data == "down") {
+ // Unschedule pending syncs if we know we're going down. We don't do
+ // this via `checkSyncStatus`, since link status isn't reflected in
+ // `this.offline`.
+ this.clearSyncTriggers();
+ }
+ break;
+ case "network:offline-status-changed":
+ case "captive-portal-detected":
+ // Whether online or offline, we'll reschedule syncs
+ this._log.trace("Network offline status change: " + data);
+ this.checkSyncStatus();
+ break;
+ case "weave:service:sync:start":
+ // Clear out any potentially pending syncs now that we're syncing
+ this.clearSyncTriggers();
+
+ // reset backoff info, if the server tells us to continue backing off,
+ // we'll handle that later
+ lazy.Status.resetBackoff();
+
+ this.globalScore = 0;
+ break;
+ case "weave:service:sync:finish":
+ this.nextSync = 0;
+ this.adjustSyncInterval();
+
+ if (
+ lazy.Status.service == SYNC_FAILED_PARTIAL &&
+ this.requiresBackoff
+ ) {
+ this.requiresBackoff = false;
+ this.handleSyncError();
+ return;
+ }
+
+ let sync_interval;
+ let nextSyncReason = "schedule";
+ this.updateGlobalScore();
+ if (
+ this.globalScore > this.syncThreshold &&
+ lazy.Status.service == STATUS_OK
+ ) {
+ // The global score should be 0 after a sync. If it's not, either
+ // items were changed during the last sync (and we should schedule an
+ // immediate follow-up sync), or an engine skipped
+ this._resyncs++;
+ if (this._resyncs <= this.maxResyncs) {
+ sync_interval = 0;
+ nextSyncReason = "resync";
+ } else {
+ this._log.warn(
+ `Resync attempt ${this._resyncs} exceeded ` +
+ `maximum ${this.maxResyncs}`
+ );
+ Svc.Obs.notify("weave:service:resyncs-finished");
+ }
+ } else {
+ this._resyncs = 0;
+ Svc.Obs.notify("weave:service:resyncs-finished");
+ }
+
+ this._syncErrors = 0;
+ if (lazy.Status.sync == NO_SYNC_NODE_FOUND) {
+ // If we don't have a Sync node, override the interval, even if we've
+ // scheduled a follow-up sync.
+ this._log.trace("Scheduling a sync at interval NO_SYNC_NODE_FOUND.");
+ sync_interval = NO_SYNC_NODE_INTERVAL;
+ }
+ this.scheduleNextSync(sync_interval, { why: nextSyncReason });
+ break;
+ case "weave:engine:sync:finish":
+ if (data == "clients") {
+ // Update the client mode because it might change what we sync.
+ this.updateClientMode();
+ }
+ break;
+ case "weave:engine:sync:error":
+ // `subject` is the exception thrown by an engine's sync() method.
+ let exception = subject;
+ if (exception.status >= 500 && exception.status <= 504) {
+ this.requiresBackoff = true;
+ }
+ break;
+ case "weave:service:login:error":
+ this.clearSyncTriggers();
+
+ if (lazy.Status.login == MASTER_PASSWORD_LOCKED) {
+ // Try again later, just as if we threw an error... only without the
+ // error count.
+ this._log.debug("Couldn't log in: master password is locked.");
+ this._log.trace(
+ "Scheduling a sync at MASTER_PASSWORD_LOCKED_RETRY_INTERVAL"
+ );
+ this.scheduleAtInterval(MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
+ } else if (!this._fatalLoginStatus.includes(lazy.Status.login)) {
+ // Not a fatal login error, just an intermittent network or server
+ // issue. Keep on syncin'.
+ this.checkSyncStatus();
+ }
+ break;
+ case "weave:service:logout:finish":
+ // Start or cancel the sync timer depending on if
+ // logged in or logged out
+ this.checkSyncStatus();
+ break;
+ case "weave:service:sync:error":
+ // There may be multiple clients but if the sync fails, client mode
+ // should still be updated so that the next sync has a correct interval.
+ this.updateClientMode();
+ this.adjustSyncInterval();
+ this.nextSync = 0;
+ this.handleSyncError();
+ break;
+ case "FxA:hawk:backoff:interval":
+ case "weave:service:backoff:interval":
+ let requested_interval = subject * 1000;
+ this._log.debug(
+ "Got backoff notification: " + requested_interval + "ms"
+ );
+ // Leave up to 25% more time for the back off.
+ let interval = requested_interval * (1 + Math.random() * 0.25);
+ lazy.Status.backoffInterval = interval;
+ lazy.Status.minimumNextSync = Date.now() + requested_interval;
+ this._log.debug(
+ "Fuzzed minimum next sync: " + lazy.Status.minimumNextSync
+ );
+ break;
+ case "weave:engine:sync:applied":
+ let numItems = subject.succeeded;
+ this._log.trace(
+ "Engine " + data + " successfully applied " + numItems + " items."
+ );
+ // Bug 1800186 - the tabs engine always reports incoming items, so we don't
+ // want special scheduling in this scenario.
+ // (However, even when we fix the underlying cause of that, we probably still can
+ // ignore tabs here - new incoming tabs don't need to trigger the extra syncs we do
+ // based on this flag.)
+ if (data != "tabs" && numItems) {
+ this.hasIncomingItems = true;
+ }
+ if (subject.newFailed) {
+ this._log.error(
+ `Engine ${data} found ${subject.newFailed} new records that failed to apply`
+ );
+ }
+ break;
+ case "weave:service:setup-complete":
+ Services.prefs.savePrefFile(null);
+ lazy.IdleService.addIdleObserver(this, this.idleTime);
+ Svc.Obs.add("wake_notification", this);
+ Svc.Obs.add("captive-portal-login-success", this);
+ Svc.Obs.add("sleep_notification", this);
+ break;
+ case "weave:service:start-over":
+ this.setDefaults();
+ try {
+ lazy.IdleService.removeIdleObserver(this, this.idleTime);
+ } catch (ex) {
+ if (ex.result != Cr.NS_ERROR_FAILURE) {
+ throw ex;
+ }
+ // In all likelihood we didn't have an idle observer registered yet.
+ // It's all good.
+ }
+ break;
+ case "idle":
+ this._log.trace("We're idle.");
+ this.idle = true;
+ // Adjust the interval for future syncs. This won't actually have any
+ // effect until the next pending sync (which will happen soon since we
+ // were just active.)
+ this.adjustSyncInterval();
+ break;
+ case "active":
+ this._log.trace("Received notification that we're back from idle.");
+ this.idle = false;
+ CommonUtils.namedTimer(
+ function onBack() {
+ if (this.idle) {
+ this._log.trace(
+ "... and we're idle again. " +
+ "Ignoring spurious back notification."
+ );
+ return;
+ }
+
+ this._log.trace("Genuine return from idle. Syncing.");
+ // Trigger a sync if we have multiple clients.
+ if (this.numClients > 1) {
+ this.scheduleNextSync(0, { why: topic });
+ }
+ },
+ IDLE_OBSERVER_BACK_DELAY,
+ this,
+ "idleDebouncerTimer"
+ );
+ break;
+ case "wake_notification":
+ this._log.debug("Woke from sleep.");
+ CommonUtils.nextTick(() => {
+ // Trigger a sync if we have multiple clients. We give it 2 seconds
+ // so the browser can recover from the wake and do more important
+ // operations first (timers etc).
+ if (this.numClients > 1) {
+ if (!this.offline) {
+ this._log.debug("Online, will sync in 2s.");
+ this.scheduleNextSync(2000, { why: topic });
+ }
+ }
+ });
+ break;
+ case "captive-portal-login-success":
+ this._log.debug("Captive portal login success. Scheduling a sync.");
+ CommonUtils.nextTick(() => {
+ this.scheduleNextSync(3000, { why: topic });
+ });
+ break;
+ case "sleep_notification":
+ if (this.service.engineManager.get("tabs")._tracker.modified) {
+ this._log.debug("Going to sleep, doing a quick sync.");
+ this.scheduleNextSync(0, { engines: ["tabs"], why: "sleep" });
+ }
+ break;
+ }
+ },
+
+ adjustSyncInterval: function adjustSyncInterval() {
+ if (this.numClients <= 1) {
+ this._log.trace("Adjusting syncInterval to singleDeviceInterval.");
+ this.syncInterval = this.singleDeviceInterval;
+ return;
+ }
+
+ // Only MULTI_DEVICE clients will enter this if statement
+ // since SINGLE_USER clients will be handled above.
+ if (this.idle) {
+ this._log.trace("Adjusting syncInterval to idleInterval.");
+ this.syncInterval = this.idleInterval;
+ return;
+ }
+
+ if (this.hasIncomingItems) {
+ this._log.trace("Adjusting syncInterval to immediateInterval.");
+ this.hasIncomingItems = false;
+ this.syncInterval = this.immediateInterval;
+ } else {
+ this._log.trace("Adjusting syncInterval to activeInterval.");
+ this.syncInterval = this.activeInterval;
+ }
+ },
+
+ updateGlobalScore() {
+ let engines = [this.service.clientsEngine].concat(
+ this.service.engineManager.getEnabled()
+ );
+ let globalScore = this.globalScore;
+ for (let i = 0; i < engines.length; i++) {
+ this._log.trace(engines[i].name + ": score: " + engines[i].score);
+ globalScore += engines[i].score;
+ engines[i]._tracker.resetScore();
+ }
+ this.globalScore = globalScore;
+ this._log.trace("Global score updated: " + globalScore);
+ },
+
+ calculateScore() {
+ this.updateGlobalScore();
+ this.checkSyncStatus();
+ },
+
+ /**
+ * Query the number of known clients to figure out what mode to be in
+ */
+ updateClientMode: function updateClientMode() {
+ // Nothing to do if it's the same amount
+ let numClients = this.numClients;
+ if (numClients == this.numClientsLastSync) {
+ return;
+ }
+
+ this._log.debug(
+ `Client count: ${this.numClientsLastSync} -> ${numClients}`
+ );
+ this.numClientsLastSync = numClients;
+
+ if (numClients <= 1) {
+ this._log.trace("Adjusting syncThreshold to SINGLE_USER_THRESHOLD");
+ this.syncThreshold = SINGLE_USER_THRESHOLD;
+ } else {
+ this._log.trace("Adjusting syncThreshold to MULTI_DEVICE_THRESHOLD");
+ this.syncThreshold = MULTI_DEVICE_THRESHOLD;
+ }
+ this.adjustSyncInterval();
+ },
+
+ /**
+ * Check if we should be syncing and schedule the next sync, if it's not scheduled
+ */
+ checkSyncStatus: function checkSyncStatus() {
+ // Should we be syncing now, if not, cancel any sync timers and return
+ // if we're in backoff, we'll schedule the next sync.
+ let ignore = [kSyncBackoffNotMet, kSyncMasterPasswordLocked];
+ let skip = this.service._checkSync(ignore);
+ this._log.trace('_checkSync returned "' + skip + '".');
+ if (skip) {
+ this.clearSyncTriggers();
+ return;
+ }
+
+ let why = "schedule";
+ // Only set the wait time to 0 if we need to sync right away
+ let wait;
+ if (this.globalScore > this.syncThreshold) {
+ this._log.debug("Global Score threshold hit, triggering sync.");
+ wait = 0;
+ why = "score";
+ }
+ this.scheduleNextSync(wait, { why });
+ },
+
+ /**
+ * Call sync() if Master Password is not locked.
+ *
+ * Otherwise, reschedule a sync for later.
+ */
+ syncIfMPUnlocked(engines, why) {
+ // No point if we got kicked out by the master password dialog.
+ if (lazy.Status.login == MASTER_PASSWORD_LOCKED && Utils.mpLocked()) {
+ this._log.debug(
+ "Not initiating sync: Login status is " + lazy.Status.login
+ );
+
+ // If we're not syncing now, we need to schedule the next one.
+ this._log.trace(
+ "Scheduling a sync at MASTER_PASSWORD_LOCKED_RETRY_INTERVAL"
+ );
+ this.scheduleAtInterval(MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
+ return;
+ }
+
+ if (!Async.isAppReady()) {
+ this._log.debug("Not initiating sync: app is shutting down");
+ return;
+ }
+ Services.tm.dispatchToMainThread(() => {
+ this.service.sync({ engines, why });
+ const now = Math.round(new Date().getTime() / 1000);
+ // Only fetch missed messages in a "scheduled" sync so we don't race against
+ // the Push service reconnecting on a network link change for example.
+ if (
+ why == "schedule" &&
+ now >=
+ this.missedFxACommandsLastFetch + this.missedFxACommandsFetchInterval
+ ) {
+ lazy.fxAccounts.commands
+ .pollDeviceCommands()
+ .then(() => {
+ this.missedFxACommandsLastFetch = now;
+ })
+ .catch(e => {
+ this._log.error("Fetching missed remote commands failed.", e);
+ });
+ }
+ });
+ },
+
+ /**
+ * Set a timer for the next sync
+ */
+ scheduleNextSync(interval, { engines = null, why = null } = {}) {
+ // If no interval was specified, use the current sync interval.
+ if (interval == null) {
+ interval = this.syncInterval;
+ }
+
+ // Ensure the interval is set to no less than the backoff.
+ if (lazy.Status.backoffInterval && interval < lazy.Status.backoffInterval) {
+ this._log.trace(
+ "Requested interval " +
+ interval +
+ " ms is smaller than the backoff interval. " +
+ "Using backoff interval " +
+ lazy.Status.backoffInterval +
+ " ms instead."
+ );
+ interval = lazy.Status.backoffInterval;
+ }
+ let nextSync = this.nextSync;
+ if (nextSync != 0) {
+ // There's already a sync scheduled. Don't reschedule if there's already
+ // a timer scheduled for sooner than requested.
+ let currentInterval = nextSync - Date.now();
+ this._log.trace(
+ "There's already a sync scheduled in " + currentInterval + " ms."
+ );
+ if (currentInterval < interval && this.syncTimer) {
+ this._log.trace(
+ "Ignoring scheduling request for next sync in " + interval + " ms."
+ );
+ return;
+ }
+ }
+
+ // Start the sync right away if we're already late.
+ if (interval <= 0) {
+ this._log.trace(`Requested sync should happen right away. (why=${why})`);
+ this.syncIfMPUnlocked(engines, why);
+ return;
+ }
+
+ this._log.debug(`Next sync in ${interval} ms. (why=${why})`);
+ CommonUtils.namedTimer(
+ () => {
+ this.syncIfMPUnlocked(engines, why);
+ },
+ interval,
+ this,
+ "syncTimer"
+ );
+
+ // Save the next sync time in-case sync is disabled (logout/offline/etc.)
+ this.nextSync = Date.now() + interval;
+ },
+
+ /**
+ * Incorporates the backoff/retry logic used in error handling and elective
+ * non-syncing.
+ */
+ scheduleAtInterval: function scheduleAtInterval(minimumInterval) {
+ let interval = Utils.calculateBackoff(
+ this._syncErrors,
+ MINIMUM_BACKOFF_INTERVAL,
+ lazy.Status.backoffInterval
+ );
+ if (minimumInterval) {
+ interval = Math.max(minimumInterval, interval);
+ }
+
+ this._log.debug(
+ "Starting client-initiated backoff. Next sync in " + interval + " ms."
+ );
+ this.scheduleNextSync(interval, { why: "client-backoff-schedule" });
+ },
+
+ autoConnect: function autoConnect() {
+ if (this.service._checkSetup() == STATUS_OK && !this.service._checkSync()) {
+ // Schedule a sync based on when a previous sync was scheduled.
+ // scheduleNextSync() will do the right thing if that time lies in
+ // the past.
+ this.scheduleNextSync(this.nextSync - Date.now(), { why: "startup" });
+ }
+ },
+
+ _syncErrors: 0,
+ /**
+ * Deal with sync errors appropriately
+ */
+ handleSyncError: function handleSyncError() {
+ this._log.trace("In handleSyncError. Error count: " + this._syncErrors);
+ this._syncErrors++;
+
+ // Do nothing on the first couple of failures, if we're not in
+ // backoff due to 5xx errors.
+ if (!lazy.Status.enforceBackoff) {
+ if (this._syncErrors < MAX_ERROR_COUNT_BEFORE_BACKOFF) {
+ this.scheduleNextSync(null, { why: "reschedule" });
+ return;
+ }
+ this._log.debug(
+ "Sync error count has exceeded " +
+ MAX_ERROR_COUNT_BEFORE_BACKOFF +
+ "; enforcing backoff."
+ );
+ lazy.Status.enforceBackoff = true;
+ }
+
+ this.scheduleAtInterval();
+ },
+
+ /**
+ * Remove any timers/observers that might trigger a sync
+ */
+ clearSyncTriggers: function clearSyncTriggers() {
+ this._log.debug("Clearing sync triggers and the global score.");
+ this.globalScore = this.nextSync = 0;
+
+ // Clear out any scheduled syncs
+ if (this.syncTimer) {
+ this.syncTimer.clear();
+ }
+ },
+};
+
+export function ErrorHandler(service) {
+ this.service = service;
+ this.init();
+}
+
+ErrorHandler.prototype = {
+ init() {
+ Svc.Obs.add("weave:engine:sync:applied", this);
+ Svc.Obs.add("weave:engine:sync:error", this);
+ Svc.Obs.add("weave:service:login:error", this);
+ Svc.Obs.add("weave:service:sync:error", this);
+ Svc.Obs.add("weave:service:sync:finish", this);
+ Svc.Obs.add("weave:service:start-over:finish", this);
+
+ this.initLogs();
+ },
+
+ initLogs: function initLogs() {
+ // Set the root Sync logger level based on a pref. All other logs will
+ // inherit this level unless they specifically override it.
+ Log.repository
+ .getLogger("Sync")
+ .manageLevelFromPref(`services.sync.log.logger`);
+ // And allow our specific log to have a custom level via a pref.
+ this._log = Log.repository.getLogger("Sync.ErrorHandler");
+ this._log.manageLevelFromPref("services.sync.log.logger.service.main");
+ },
+
+ observe(subject, topic, data) {
+ this._log.trace("Handling " + topic);
+ switch (topic) {
+ case "weave:engine:sync:applied":
+ if (subject.newFailed) {
+ // An engine isn't able to apply one or more incoming records.
+ // We don't fail hard on this, but it usually indicates a bug,
+ // so for now treat it as sync error (c.f. Service._syncEngine())
+ lazy.Status.engines = [data, ENGINE_APPLY_FAIL];
+ this._log.debug(data + " failed to apply some records.");
+ }
+ break;
+ case "weave:engine:sync:error": {
+ let exception = subject; // exception thrown by engine's sync() method
+ let engine_name = data; // engine name that threw the exception
+
+ this.checkServerError(exception);
+
+ lazy.Status.engines = [
+ engine_name,
+ exception.failureCode || ENGINE_UNKNOWN_FAIL,
+ ];
+ if (Async.isShutdownException(exception)) {
+ this._log.debug(
+ engine_name +
+ " was interrupted due to the application shutting down"
+ );
+ } else {
+ this._log.debug(engine_name + " failed", exception);
+ }
+ break;
+ }
+ case "weave:service:login:error":
+ this._log.error("Sync encountered a login error");
+ this.resetFileLog();
+ break;
+ case "weave:service:sync:error": {
+ if (lazy.Status.sync == CREDENTIALS_CHANGED) {
+ this.service.logout();
+ }
+
+ let exception = subject;
+ if (Async.isShutdownException(exception)) {
+ // If we are shutting down we just log the fact, attempt to flush
+ // the log file and get out of here!
+ this._log.error(
+ "Sync was interrupted due to the application shutting down"
+ );
+ this.resetFileLog();
+ break;
+ }
+
+ // Not a shutdown related exception...
+ this._log.error("Sync encountered an error", exception);
+ this.resetFileLog();
+ break;
+ }
+ case "weave:service:sync:finish":
+ this._log.trace("Status.service is " + lazy.Status.service);
+
+ // Check both of these status codes: in the event of a failure in one
+ // engine, Status.service will be SYNC_FAILED_PARTIAL despite
+ // Status.sync being SYNC_SUCCEEDED.
+ // *facepalm*
+ if (
+ lazy.Status.sync == SYNC_SUCCEEDED &&
+ lazy.Status.service == STATUS_OK
+ ) {
+ // Great. Let's clear our mid-sync 401 note.
+ this._log.trace("Clearing lastSyncReassigned.");
+ Svc.Prefs.reset("lastSyncReassigned");
+ }
+
+ if (lazy.Status.service == SYNC_FAILED_PARTIAL) {
+ this._log.error("Some engines did not sync correctly.");
+ }
+ this.resetFileLog();
+ break;
+ case "weave:service:start-over:finish":
+ // ensure we capture any logs between the last sync and the reset completing.
+ this.resetFileLog()
+ .then(() => {
+ // although for privacy reasons we also delete all logs (but we allow
+ // a preference to avoid this to help with debugging.)
+ if (!Svc.Prefs.get("log.keepLogsOnReset", false)) {
+ return logManager.removeAllLogs().then(() => {
+ Svc.Obs.notify("weave:service:remove-file-log");
+ });
+ }
+ return null;
+ })
+ .catch(err => {
+ // So we failed to delete the logs - take the ironic option of
+ // writing this error to the logs we failed to delete!
+ this._log.error("Failed to delete logs on reset", err);
+ });
+ break;
+ }
+ },
+
+ async _dumpAddons() {
+ // Just dump the items that sync may be concerned with. Specifically,
+ // active extensions that are not hidden.
+ let addons = [];
+ try {
+ addons = await lazy.AddonManager.getAddonsByTypes(["extension"]);
+ } catch (e) {
+ this._log.warn("Failed to dump addons", e);
+ }
+
+ let relevantAddons = addons.filter(x => x.isActive && !x.hidden);
+ this._log.trace("Addons installed", relevantAddons.length);
+ for (let addon of relevantAddons) {
+ this._log.trace(" - ${name}, version ${version}, id ${id}", addon);
+ }
+ },
+
+ /**
+ * Generate a log file for the sync that just completed
+ * and refresh the input & output streams.
+ */
+ async resetFileLog() {
+ // If we're writing an error log, dump extensions that may be causing problems.
+ if (logManager.sawError) {
+ await this._dumpAddons();
+ }
+ const logType = await logManager.resetFileLog();
+ if (logType == logManager.ERROR_LOG_WRITTEN) {
+ console.error(
+ "Sync encountered an error - see about:sync-log for the log file."
+ );
+ }
+ Svc.Obs.notify("weave:service:reset-file-log");
+ },
+
+ /**
+ * Handle HTTP response results or exceptions and set the appropriate
+ * Status.* bits.
+ *
+ * This method also looks for "side-channel" warnings.
+ */
+ checkServerError(resp) {
+ // In this case we were passed a resolved value of Resource#_doRequest.
+ switch (resp.status) {
+ case 400:
+ if (resp == RESPONSE_OVER_QUOTA) {
+ lazy.Status.sync = OVER_QUOTA;
+ }
+ break;
+
+ case 401:
+ this.service.logout();
+ this._log.info("Got 401 response; resetting clusterURL.");
+ this.service.clusterURL = null;
+
+ let delay = 0;
+ if (Svc.Prefs.get("lastSyncReassigned")) {
+ // We got a 401 in the middle of the previous sync, and we just got
+ // another. Login must have succeeded in order for us to get here, so
+ // the password should be correct.
+ // This is likely to be an intermittent server issue, so back off and
+ // give it time to recover.
+ this._log.warn("Last sync also failed for 401. Delaying next sync.");
+ delay = MINIMUM_BACKOFF_INTERVAL;
+ } else {
+ this._log.debug("New mid-sync 401 failure. Making a note.");
+ Svc.Prefs.set("lastSyncReassigned", true);
+ }
+ this._log.info("Attempting to schedule another sync.");
+ this.service.scheduler.scheduleNextSync(delay, { why: "reschedule" });
+ break;
+
+ case 500:
+ case 502:
+ case 503:
+ case 504:
+ lazy.Status.enforceBackoff = true;
+ if (resp.status == 503 && resp.headers["retry-after"]) {
+ let retryAfter = resp.headers["retry-after"];
+ this._log.debug("Got Retry-After: " + retryAfter);
+ if (this.service.isLoggedIn) {
+ lazy.Status.sync = SERVER_MAINTENANCE;
+ } else {
+ lazy.Status.login = SERVER_MAINTENANCE;
+ }
+ Svc.Obs.notify(
+ "weave:service:backoff:interval",
+ parseInt(retryAfter, 10)
+ );
+ }
+ break;
+ }
+
+ // In this other case we were passed a rejection value.
+ switch (resp.result) {
+ case Cr.NS_ERROR_UNKNOWN_HOST:
+ case Cr.NS_ERROR_CONNECTION_REFUSED:
+ case Cr.NS_ERROR_NET_TIMEOUT:
+ case Cr.NS_ERROR_NET_RESET:
+ case Cr.NS_ERROR_NET_INTERRUPT:
+ case Cr.NS_ERROR_PROXY_CONNECTION_REFUSED:
+ // The constant says it's about login, but in fact it just
+ // indicates general network error.
+ if (this.service.isLoggedIn) {
+ lazy.Status.sync = LOGIN_FAILED_NETWORK_ERROR;
+ } else {
+ lazy.Status.login = LOGIN_FAILED_NETWORK_ERROR;
+ }
+ break;
+ }
+ },
+};
diff --git a/services/sync/modules/record.sys.mjs b/services/sync/modules/record.sys.mjs
new file mode 100644
index 0000000000..7d5918a8ca
--- /dev/null
+++ b/services/sync/modules/record.sys.mjs
@@ -0,0 +1,1335 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const CRYPTO_COLLECTION = "crypto";
+const KEYS_WBO = "keys";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import {
+ DEFAULT_DOWNLOAD_BATCH_SIZE,
+ DEFAULT_KEYBUNDLE_NAME,
+} from "resource://services-sync/constants.sys.mjs";
+import { BulkKeyBundle } from "resource://services-sync/keys.sys.mjs";
+import { Weave } from "resource://services-sync/main.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { CryptoUtils } from "resource://services-crypto/utils.sys.mjs";
+
+/**
+ * The base class for all Sync basic storage objects (BSOs). This is the format
+ * used to store all records on the Sync server. In an earlier version of the
+ * Sync protocol, BSOs used to be called WBOs, or Weave Basic Objects. This
+ * class retains the old name.
+ *
+ * @class
+ * @param {String} collection The collection name for this BSO.
+ * @param {String} id The ID of this BSO.
+ */
+export function WBORecord(collection, id) {
+ this.data = {};
+ this.payload = {};
+ this.collection = collection; // Optional.
+ this.id = id; // Optional.
+}
+
+WBORecord.prototype = {
+ _logName: "Sync.Record.WBO",
+
+ get sortindex() {
+ if (this.data.sortindex) {
+ return this.data.sortindex;
+ }
+ return 0;
+ },
+
+ // Get thyself from your URI, then deserialize.
+ // Set thine 'response' field.
+ async fetch(resource) {
+ if (!(resource instanceof Resource)) {
+ throw new Error("First argument must be a Resource instance.");
+ }
+
+ let r = await resource.get();
+ if (r.success) {
+ this.deserialize(r.obj); // Warning! Muffles exceptions!
+ }
+ this.response = r;
+ return this;
+ },
+
+ upload(resource) {
+ if (!(resource instanceof Resource)) {
+ throw new Error("First argument must be a Resource instance.");
+ }
+
+ return resource.put(this);
+ },
+
+ // Take a base URI string, with trailing slash, and return the URI of this
+ // WBO based on collection and ID.
+ uri(base) {
+ if (this.collection && this.id) {
+ let url = CommonUtils.makeURI(base + this.collection + "/" + this.id);
+ url.QueryInterface(Ci.nsIURL);
+ return url;
+ }
+ return null;
+ },
+
+ deserialize: function deserialize(json) {
+ if (!json || typeof json !== "object") {
+ throw new TypeError("Can't deserialize record from: " + json);
+ }
+ this.data = json;
+ try {
+ // The payload is likely to be JSON, but if not, keep it as a string
+ this.payload = JSON.parse(this.payload);
+ } catch (ex) {}
+ },
+
+ toJSON: function toJSON() {
+ // Copy fields from data to be stringified, making sure payload is a string
+ let obj = {};
+ for (let [key, val] of Object.entries(this.data)) {
+ obj[key] = key == "payload" ? JSON.stringify(val) : val;
+ }
+ if (this.ttl) {
+ obj.ttl = this.ttl;
+ }
+ return obj;
+ },
+
+ toString: function toString() {
+ return (
+ "{ " +
+ "id: " +
+ this.id +
+ " " +
+ "index: " +
+ this.sortindex +
+ " " +
+ "modified: " +
+ this.modified +
+ " " +
+ "ttl: " +
+ this.ttl +
+ " " +
+ "payload: " +
+ JSON.stringify(this.payload) +
+ " }"
+ );
+ },
+};
+
+Utils.deferGetSet(WBORecord, "data", [
+ "id",
+ "modified",
+ "sortindex",
+ "payload",
+]);
+
+/**
+ * An encrypted BSO record. This subclass handles encrypting and decrypting the
+ * BSO payload, but doesn't parse or interpret the cleartext string. Subclasses
+ * must override `transformBeforeEncrypt` and `transformAfterDecrypt` to process
+ * the cleartext.
+ *
+ * This class is only exposed for bridged engines, which handle serialization
+ * and deserialization in Rust. Sync engines implemented in JS should subclass
+ * `CryptoWrapper` instead, which takes care of transforming the cleartext into
+ * an object, and ensuring its contents are valid.
+ *
+ * @class
+ * @template Cleartext
+ * @param {String} collection The collection name for this BSO.
+ * @param {String} id The ID of this BSO.
+ */
+export function RawCryptoWrapper(collection, id) {
+ // Setting properties before calling the superclass constructor isn't allowed
+ // in new-style classes (`class MyRecord extends RawCryptoWrapper`), but
+ // allowed with plain functions. This is also why `defaultCleartext` is a
+ // method, and not simply set in the subclass constructor.
+ this.cleartext = this.defaultCleartext();
+ WBORecord.call(this, collection, id);
+ this.ciphertext = null;
+}
+
+RawCryptoWrapper.prototype = {
+ _logName: "Sync.Record.RawCryptoWrapper",
+
+ /**
+ * Returns the default empty cleartext for this record type. This is exposed
+ * as a method so that subclasses can override it, and access the default
+ * cleartext in their constructors. `CryptoWrapper`, for example, overrides
+ * this to return an empty object, so that initializing the `id` in its
+ * constructor calls its overridden `id` setter.
+ *
+ * @returns {Cleartext} An empty cleartext.
+ */
+ defaultCleartext() {
+ return null;
+ },
+
+ /**
+ * Transforms the cleartext into a string that can be encrypted and wrapped
+ * in a BSO payload. This is called before uploading the record to the server.
+ *
+ * @param {Cleartext} outgoingCleartext The cleartext to upload.
+ * @returns {String} The serialized cleartext.
+ */
+ transformBeforeEncrypt(outgoingCleartext) {
+ throw new TypeError("Override to stringify outgoing records");
+ },
+
+ /**
+ * Transforms an incoming cleartext string into an instance of the
+ * `Cleartext` type. This is called when fetching the record from the
+ * server.
+ *
+ * @param {String} incomingCleartext The decrypted cleartext string.
+ * @returns {Cleartext} The parsed cleartext.
+ */
+ transformAfterDecrypt(incomingCleartext) {
+ throw new TypeError("Override to parse incoming records");
+ },
+
+ ciphertextHMAC: async function ciphertextHMAC(keyBundle) {
+ let hmacKeyByteString = keyBundle.hmacKey;
+ if (!hmacKeyByteString) {
+ throw new Error("Cannot compute HMAC without an HMAC key.");
+ }
+ let hmacKey = CommonUtils.byteStringToArrayBuffer(hmacKeyByteString);
+ // NB: this.ciphertext is a base64-encoded string. For some reason this
+ // implementation computes the HMAC on the encoded value.
+ let data = CommonUtils.byteStringToArrayBuffer(this.ciphertext);
+ let hmac = await CryptoUtils.hmac("SHA-256", hmacKey, data);
+ return CommonUtils.bytesAsHex(CommonUtils.arrayBufferToByteString(hmac));
+ },
+
+ /*
+ * Don't directly use the sync key. Instead, grab a key for this
+ * collection, which is decrypted with the sync key.
+ *
+ * Cache those keys; invalidate the cache if the time on the keys collection
+ * changes, or other auth events occur.
+ *
+ * Optional key bundle overrides the collection key lookup.
+ */
+ async encrypt(keyBundle) {
+ if (!keyBundle) {
+ throw new Error("A key bundle must be supplied to encrypt.");
+ }
+
+ this.IV = Weave.Crypto.generateRandomIV();
+ this.ciphertext = await Weave.Crypto.encrypt(
+ this.transformBeforeEncrypt(this.cleartext),
+ keyBundle.encryptionKeyB64,
+ this.IV
+ );
+ this.hmac = await this.ciphertextHMAC(keyBundle);
+ this.cleartext = null;
+ },
+
+ // Optional key bundle.
+ async decrypt(keyBundle) {
+ if (!this.ciphertext) {
+ throw new Error("No ciphertext: nothing to decrypt?");
+ }
+
+ if (!keyBundle) {
+ throw new Error("A key bundle must be supplied to decrypt.");
+ }
+
+ // Authenticate the encrypted blob with the expected HMAC
+ let computedHMAC = await this.ciphertextHMAC(keyBundle);
+
+ if (computedHMAC != this.hmac) {
+ Utils.throwHMACMismatch(this.hmac, computedHMAC);
+ }
+
+ let cleartext = await Weave.Crypto.decrypt(
+ this.ciphertext,
+ keyBundle.encryptionKeyB64,
+ this.IV
+ );
+ this.cleartext = this.transformAfterDecrypt(cleartext);
+ this.ciphertext = null;
+
+ return this.cleartext;
+ },
+};
+
+Object.setPrototypeOf(RawCryptoWrapper.prototype, WBORecord.prototype);
+
+Utils.deferGetSet(RawCryptoWrapper, "payload", ["ciphertext", "IV", "hmac"]);
+
+/**
+ * An encrypted BSO record with a JSON payload. All engines implemented in JS
+ * should subclass this class to describe their own record types.
+ *
+ * @class
+ * @param {String} collection The collection name for this BSO.
+ * @param {String} id The ID of this BSO.
+ */
+export function CryptoWrapper(collection, id) {
+ RawCryptoWrapper.call(this, collection, id);
+}
+
+CryptoWrapper.prototype = {
+ _logName: "Sync.Record.CryptoWrapper",
+
+ defaultCleartext() {
+ return {};
+ },
+
+ transformBeforeEncrypt(cleartext) {
+ return JSON.stringify(cleartext);
+ },
+
+ transformAfterDecrypt(cleartext) {
+ // Handle invalid data here. Elsewhere we assume that cleartext is an object.
+ let json_result = JSON.parse(cleartext);
+
+ if (!(json_result && json_result instanceof Object)) {
+ throw new Error(
+ `Decryption failed: result is <${json_result}>, not an object.`
+ );
+ }
+
+ // If the payload has an encrypted id ensure it matches the requested record's id.
+ if (json_result.id && json_result.id != this.id) {
+ throw new Error(`Record id mismatch: ${json_result.id} != ${this.id}`);
+ }
+
+ return json_result;
+ },
+
+ cleartextToString() {
+ return JSON.stringify(this.cleartext);
+ },
+
+ toString: function toString() {
+ let payload = this.deleted ? "DELETED" : this.cleartextToString();
+
+ return (
+ "{ " +
+ "id: " +
+ this.id +
+ " " +
+ "index: " +
+ this.sortindex +
+ " " +
+ "modified: " +
+ this.modified +
+ " " +
+ "ttl: " +
+ this.ttl +
+ " " +
+ "payload: " +
+ payload +
+ " " +
+ "collection: " +
+ (this.collection || "undefined") +
+ " }"
+ );
+ },
+
+ // The custom setter below masks the parent's getter, so explicitly call it :(
+ get id() {
+ return super.id;
+ },
+
+ // Keep both plaintext and encrypted versions of the id to verify integrity
+ set id(val) {
+ super.id = val;
+ this.cleartext.id = val;
+ },
+};
+
+Object.setPrototypeOf(CryptoWrapper.prototype, RawCryptoWrapper.prototype);
+
+Utils.deferGetSet(CryptoWrapper, "cleartext", "deleted");
+
+/**
+ * An interface and caching layer for records.
+ */
+export function RecordManager(service) {
+ this.service = service;
+
+ this._log = Log.repository.getLogger(this._logName);
+ this._records = {};
+}
+
+RecordManager.prototype = {
+ _recordType: CryptoWrapper,
+ _logName: "Sync.RecordManager",
+
+ async import(url) {
+ this._log.trace("Importing record: " + (url.spec ? url.spec : url));
+ try {
+ // Clear out the last response with empty object if GET fails
+ this.response = {};
+ this.response = await this.service.resource(url).get();
+
+ // Don't parse and save the record on failure
+ if (!this.response.success) {
+ return null;
+ }
+
+ let record = new this._recordType(url);
+ record.deserialize(this.response.obj);
+
+ return this.set(url, record);
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Failed to import record", ex);
+ return null;
+ }
+ },
+
+ get(url) {
+ // Use a url string as the key to the hash
+ let spec = url.spec ? url.spec : url;
+ if (spec in this._records) {
+ return Promise.resolve(this._records[spec]);
+ }
+ return this.import(url);
+ },
+
+ set: function RecordMgr_set(url, record) {
+ let spec = url.spec ? url.spec : url;
+ return (this._records[spec] = record);
+ },
+
+ contains: function RecordMgr_contains(url) {
+ if ((url.spec || url) in this._records) {
+ return true;
+ }
+ return false;
+ },
+
+ clearCache: function recordMgr_clearCache() {
+ this._records = {};
+ },
+
+ del: function RecordMgr_del(url) {
+ delete this._records[url];
+ },
+};
+
+/**
+ * Keeps track of mappings between collection names ('tabs') and KeyBundles.
+ *
+ * You can update this thing simply by giving it /info/collections. It'll
+ * use the last modified time to bring itself up to date.
+ */
+export function CollectionKeyManager(lastModified, default_, collections) {
+ this.lastModified = lastModified || 0;
+ this._default = default_ || null;
+ this._collections = collections || {};
+
+ this._log = Log.repository.getLogger("Sync.CollectionKeyManager");
+}
+
+// TODO: persist this locally as an Identity. Bug 610913.
+// Note that the last modified time needs to be preserved.
+CollectionKeyManager.prototype = {
+ /**
+ * Generate a new CollectionKeyManager that has the same attributes
+ * as this one.
+ */
+ clone() {
+ const newCollections = {};
+ for (let c in this._collections) {
+ newCollections[c] = this._collections[c];
+ }
+
+ return new CollectionKeyManager(
+ this.lastModified,
+ this._default,
+ newCollections
+ );
+ },
+
+ // Return information about old vs new keys:
+ // * same: true if two collections are equal
+ // * changed: an array of collection names that changed.
+ _compareKeyBundleCollections: function _compareKeyBundleCollections(m1, m2) {
+ let changed = [];
+
+ function process(m1, m2) {
+ for (let k1 in m1) {
+ let v1 = m1[k1];
+ let v2 = m2[k1];
+ if (!(v1 && v2 && v1.equals(v2))) {
+ changed.push(k1);
+ }
+ }
+ }
+
+ // Diffs both ways.
+ process(m1, m2);
+ process(m2, m1);
+
+ // Return a sorted, unique array.
+ changed.sort();
+ let last;
+ changed = changed.filter(x => x != last && (last = x));
+ return { same: !changed.length, changed };
+ },
+
+ get isClear() {
+ return !this._default;
+ },
+
+ clear: function clear() {
+ this._log.info("Clearing collection keys...");
+ this.lastModified = 0;
+ this._collections = {};
+ this._default = null;
+ },
+
+ keyForCollection(collection) {
+ if (collection && this._collections[collection]) {
+ return this._collections[collection];
+ }
+
+ return this._default;
+ },
+
+ /**
+ * If `collections` (an array of strings) is provided, iterate
+ * over it and generate random keys for each collection.
+ * Create a WBO for the given data.
+ */
+ _makeWBO(collections, defaultBundle) {
+ let wbo = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let c = {};
+ for (let k in collections) {
+ c[k] = collections[k].keyPairB64;
+ }
+ wbo.cleartext = {
+ default: defaultBundle ? defaultBundle.keyPairB64 : null,
+ collections: c,
+ collection: CRYPTO_COLLECTION,
+ id: KEYS_WBO,
+ };
+ return wbo;
+ },
+
+ /**
+ * Create a WBO for the current keys.
+ */
+ asWBO(collection, id) {
+ return this._makeWBO(this._collections, this._default);
+ },
+
+ /**
+ * Compute a new default key, and new keys for any specified collections.
+ */
+ async newKeys(collections) {
+ let newDefaultKeyBundle = await this.newDefaultKeyBundle();
+
+ let newColls = {};
+ if (collections) {
+ for (let c of collections) {
+ let b = new BulkKeyBundle(c);
+ await b.generateRandom();
+ newColls[c] = b;
+ }
+ }
+ return [newDefaultKeyBundle, newColls];
+ },
+
+ /**
+ * Generates new keys, but does not replace our local copy. Use this to
+ * verify an upload before storing.
+ */
+ async generateNewKeysWBO(collections) {
+ let newDefaultKey, newColls;
+ [newDefaultKey, newColls] = await this.newKeys(collections);
+
+ return this._makeWBO(newColls, newDefaultKey);
+ },
+
+ /**
+ * Create a new default key.
+ *
+ * @returns {BulkKeyBundle}
+ */
+ async newDefaultKeyBundle() {
+ const key = new BulkKeyBundle(DEFAULT_KEYBUNDLE_NAME);
+ await key.generateRandom();
+ return key;
+ },
+
+ /**
+ * Create a new default key and store it as this._default, since without one you cannot use setContents.
+ */
+ async generateDefaultKey() {
+ this._default = await this.newDefaultKeyBundle();
+ },
+
+ /**
+ * Return true if keys are already present for each of the given
+ * collections.
+ */
+ hasKeysFor(collections) {
+ // We can't use filter() here because sometimes collections is an iterator.
+ for (let collection of collections) {
+ if (!this._collections[collection]) {
+ return false;
+ }
+ }
+ return true;
+ },
+
+ /**
+ * Return a new CollectionKeyManager that has keys for each of the
+ * given collections (creating new ones for collections where we
+ * don't already have keys).
+ */
+ async ensureKeysFor(collections) {
+ const newKeys = Object.assign({}, this._collections);
+ for (let c of collections) {
+ if (newKeys[c]) {
+ continue; // don't replace existing keys
+ }
+
+ const b = new BulkKeyBundle(c);
+ await b.generateRandom();
+ newKeys[c] = b;
+ }
+ return new CollectionKeyManager(this.lastModified, this._default, newKeys);
+ },
+
+ // Take the fetched info/collections WBO, checking the change
+ // time of the crypto collection.
+ updateNeeded(info_collections) {
+ this._log.info(
+ "Testing for updateNeeded. Last modified: " + this.lastModified
+ );
+
+ // No local record of modification time? Need an update.
+ if (!this.lastModified) {
+ return true;
+ }
+
+ // No keys on the server? We need an update, though our
+ // update handling will be a little more drastic...
+ if (!(CRYPTO_COLLECTION in info_collections)) {
+ return true;
+ }
+
+ // Otherwise, we need an update if our modification time is stale.
+ return info_collections[CRYPTO_COLLECTION] > this.lastModified;
+ },
+
+ //
+ // Set our keys and modified time to the values fetched from the server.
+ // Returns one of three values:
+ //
+ // * If the default key was modified, return true.
+ // * If the default key was not modified, but per-collection keys were,
+ // return an array of such.
+ // * Otherwise, return false -- we were up-to-date.
+ //
+ setContents: function setContents(payload, modified) {
+ let self = this;
+
+ this._log.info(
+ "Setting collection keys contents. Our last modified: " +
+ this.lastModified +
+ ", input modified: " +
+ modified +
+ "."
+ );
+
+ if (!payload) {
+ throw new Error("No payload in CollectionKeyManager.setContents().");
+ }
+
+ if (!payload.default) {
+ this._log.warn("No downloaded default key: this should not occur.");
+ this._log.warn("Not clearing local keys.");
+ throw new Error(
+ "No default key in CollectionKeyManager.setContents(). Cannot proceed."
+ );
+ }
+
+ // Process the incoming default key.
+ let b = new BulkKeyBundle(DEFAULT_KEYBUNDLE_NAME);
+ b.keyPairB64 = payload.default;
+ let newDefault = b;
+
+ // Process the incoming collections.
+ let newCollections = {};
+ if ("collections" in payload) {
+ this._log.info("Processing downloaded per-collection keys.");
+ let colls = payload.collections;
+ for (let k in colls) {
+ let v = colls[k];
+ if (v) {
+ let keyObj = new BulkKeyBundle(k);
+ keyObj.keyPairB64 = v;
+ newCollections[k] = keyObj;
+ }
+ }
+ }
+
+ // Check to see if these are already our keys.
+ let sameDefault = this._default && this._default.equals(newDefault);
+ let collComparison = this._compareKeyBundleCollections(
+ newCollections,
+ this._collections
+ );
+ let sameColls = collComparison.same;
+
+ if (sameDefault && sameColls) {
+ self._log.info("New keys are the same as our old keys!");
+ if (modified) {
+ self._log.info("Bumped local modified time.");
+ self.lastModified = modified;
+ }
+ return false;
+ }
+
+ // Make sure things are nice and tidy before we set.
+ this.clear();
+
+ this._log.info("Saving downloaded keys.");
+ this._default = newDefault;
+ this._collections = newCollections;
+
+ // Always trust the server.
+ if (modified) {
+ self._log.info("Bumping last modified to " + modified);
+ self.lastModified = modified;
+ }
+
+ return sameDefault ? collComparison.changed : true;
+ },
+
+ async updateContents(syncKeyBundle, storage_keys) {
+ let log = this._log;
+ log.info("Updating collection keys...");
+
+ // storage_keys is a WBO, fetched from storage/crypto/keys.
+ // Its payload is the default key, and a map of collections to keys.
+ // We lazily compute the key objects from the strings we're given.
+
+ let payload;
+ try {
+ payload = await storage_keys.decrypt(syncKeyBundle);
+ } catch (ex) {
+ log.warn("Got exception decrypting storage keys with sync key.", ex);
+ log.info("Aborting updateContents. Rethrowing.");
+ throw ex;
+ }
+
+ let r = this.setContents(payload, storage_keys.modified);
+ log.info("Collection keys updated.");
+ return r;
+ },
+};
+
+export function Collection(uri, recordObj, service) {
+ if (!service) {
+ throw new Error("Collection constructor requires a service.");
+ }
+
+ Resource.call(this, uri);
+
+ // This is a bit hacky, but gets the job done.
+ let res = service.resource(uri);
+ this.authenticator = res.authenticator;
+
+ this._recordObj = recordObj;
+ this._service = service;
+
+ this._full = false;
+ this._ids = null;
+ this._limit = 0;
+ this._older = 0;
+ this._newer = 0;
+ this._data = [];
+ // optional members used by batch upload operations.
+ this._batch = null;
+ this._commit = false;
+ // Used for batch download operations -- note that this is explicitly an
+ // opaque value and not (necessarily) a number.
+ this._offset = null;
+}
+
+Collection.prototype = {
+ _logName: "Sync.Collection",
+
+ _rebuildURL: function Coll__rebuildURL() {
+ // XXX should consider what happens if it's not a URL...
+ this.uri.QueryInterface(Ci.nsIURL);
+
+ let args = [];
+ if (this.older) {
+ args.push("older=" + this.older);
+ }
+ if (this.newer) {
+ args.push("newer=" + this.newer);
+ }
+ if (this.full) {
+ args.push("full=1");
+ }
+ if (this.sort) {
+ args.push("sort=" + this.sort);
+ }
+ if (this.ids != null) {
+ args.push("ids=" + this.ids);
+ }
+ if (this.limit > 0 && this.limit != Infinity) {
+ args.push("limit=" + this.limit);
+ }
+ if (this._batch) {
+ args.push("batch=" + encodeURIComponent(this._batch));
+ }
+ if (this._commit) {
+ args.push("commit=true");
+ }
+ if (this._offset) {
+ args.push("offset=" + encodeURIComponent(this._offset));
+ }
+
+ this.uri = this.uri
+ .mutate()
+ .setQuery(args.length ? "?" + args.join("&") : "")
+ .finalize();
+ },
+
+ // get full items
+ get full() {
+ return this._full;
+ },
+ set full(value) {
+ this._full = value;
+ this._rebuildURL();
+ },
+
+ // Apply the action to a certain set of ids
+ get ids() {
+ return this._ids;
+ },
+ set ids(value) {
+ this._ids = value;
+ this._rebuildURL();
+ },
+
+ // Limit how many records to get
+ get limit() {
+ return this._limit;
+ },
+ set limit(value) {
+ this._limit = value;
+ this._rebuildURL();
+ },
+
+ // get only items modified before some date
+ get older() {
+ return this._older;
+ },
+ set older(value) {
+ this._older = value;
+ this._rebuildURL();
+ },
+
+ // get only items modified since some date
+ get newer() {
+ return this._newer;
+ },
+ set newer(value) {
+ this._newer = value;
+ this._rebuildURL();
+ },
+
+ // get items sorted by some criteria. valid values:
+ // oldest (oldest first)
+ // newest (newest first)
+ // index
+ get sort() {
+ return this._sort;
+ },
+ set sort(value) {
+ if (value && value != "oldest" && value != "newest" && value != "index") {
+ throw new TypeError(
+ `Illegal value for sort: "${value}" (should be "oldest", "newest", or "index").`
+ );
+ }
+ this._sort = value;
+ this._rebuildURL();
+ },
+
+ get offset() {
+ return this._offset;
+ },
+ set offset(value) {
+ this._offset = value;
+ this._rebuildURL();
+ },
+
+ // Set information about the batch for this request.
+ get batch() {
+ return this._batch;
+ },
+ set batch(value) {
+ this._batch = value;
+ this._rebuildURL();
+ },
+
+ get commit() {
+ return this._commit;
+ },
+ set commit(value) {
+ this._commit = value && true;
+ this._rebuildURL();
+ },
+
+ // Similar to get(), but will page through the items `batchSize` at a time,
+ // deferring calling the record handler until we've gotten them all.
+ //
+ // Returns the last response processed, and doesn't run the record handler
+ // on any items if a non-success status is received while downloading the
+ // records (or if a network error occurs).
+ async getBatched(batchSize = DEFAULT_DOWNLOAD_BATCH_SIZE) {
+ let totalLimit = Number(this.limit) || Infinity;
+ if (batchSize <= 0 || batchSize >= totalLimit) {
+ throw new Error("Invalid batch size");
+ }
+
+ if (!this.full) {
+ throw new Error("getBatched is unimplemented for guid-only GETs");
+ }
+
+ // _onComplete and _onProgress are reset after each `get` by Resource.
+ let { _onComplete, _onProgress } = this;
+ let recordBuffer = [];
+ let resp;
+ try {
+ let lastModifiedTime;
+ this.limit = batchSize;
+
+ do {
+ this._onProgress = _onProgress;
+ this._onComplete = _onComplete;
+ if (batchSize + recordBuffer.length > totalLimit) {
+ this.limit = totalLimit - recordBuffer.length;
+ }
+ this._log.trace("Performing batched GET", {
+ limit: this.limit,
+ offset: this.offset,
+ });
+ // Actually perform the request
+ resp = await this.get();
+ if (!resp.success) {
+ recordBuffer = [];
+ break;
+ }
+ for (let json of resp.obj) {
+ let record = new this._recordObj();
+ record.deserialize(json);
+ recordBuffer.push(record);
+ }
+
+ // Initialize last modified, or check that something broken isn't happening.
+ let lastModified = resp.headers["x-last-modified"];
+ if (!lastModifiedTime) {
+ lastModifiedTime = lastModified;
+ this.setHeader("X-If-Unmodified-Since", lastModified);
+ } else if (lastModified != lastModifiedTime) {
+ // Should be impossible -- We'd get a 412 in this case.
+ throw new Error(
+ "X-Last-Modified changed in the middle of a download batch! " +
+ `${lastModified} => ${lastModifiedTime}`
+ );
+ }
+
+ // If this is missing, we're finished.
+ this.offset = resp.headers["x-weave-next-offset"];
+ } while (this.offset && totalLimit > recordBuffer.length);
+ } finally {
+ // Ensure we undo any temporary state so that subsequent calls to get()
+ // or getBatched() work properly. We do this before calling the record
+ // handler so that we can more convincingly pretend to be a normal get()
+ // call. Note: we're resetting these to the values they had before this
+ // function was called.
+ this._limit = totalLimit;
+ this._offset = null;
+ delete this._headers["x-if-unmodified-since"];
+ this._rebuildURL();
+ }
+ return { response: resp, records: recordBuffer };
+ },
+
+ // This object only supports posting via the postQueue object.
+ post() {
+ throw new Error(
+ "Don't directly post to a collection - use newPostQueue instead"
+ );
+ },
+
+ newPostQueue(log, timestamp, postCallback) {
+ let poster = (data, headers, batch, commit) => {
+ this.batch = batch;
+ this.commit = commit;
+ for (let [header, value] of headers) {
+ this.setHeader(header, value);
+ }
+ return Resource.prototype.post.call(this, data);
+ };
+ return new PostQueue(
+ poster,
+ timestamp,
+ this._service.serverConfiguration || {},
+ log,
+ postCallback
+ );
+ },
+};
+
+Object.setPrototypeOf(Collection.prototype, Resource.prototype);
+
+// These are limits for requests provided by the server at the
+// info/configuration endpoint -- server documentation is available here:
+// http://moz-services-docs.readthedocs.io/en/latest/storage/apis-1.5.html#api-instructions
+//
+// All are optional, however we synthesize (non-infinite) default values for the
+// "max_request_bytes" and "max_record_payload_bytes" options. For the others,
+// we ignore them (we treat the limit is infinite) if they're missing.
+//
+// These are also the only ones that all servers (even batching-disabled
+// servers) should support, at least once this sync-serverstorage patch is
+// everywhere https://github.com/mozilla-services/server-syncstorage/pull/74
+//
+// Batching enabled servers also limit the amount of payload data and number
+// of and records we can send in a single post as well as in the whole batch.
+// Note that the byte limits for these there are just with respect to the
+// *payload* data, e.g. the data appearing in the payload property (a
+// string) of the object.
+//
+// Note that in practice, these limits should be sensible, but the code makes
+// no assumptions about this. If we hit any of the limits, we perform the
+// corresponding action (e.g. submit a request, possibly committing the
+// current batch).
+const DefaultPostQueueConfig = Object.freeze({
+ // Number of total bytes allowed in a request
+ max_request_bytes: 260 * 1024,
+
+ // Maximum number of bytes allowed in the "payload" property of a record.
+ max_record_payload_bytes: 256 * 1024,
+
+ // The limit for how many bytes worth of data appearing in "payload"
+ // properties are allowed in a single post.
+ max_post_bytes: Infinity,
+
+ // The limit for the number of records allowed in a single post.
+ max_post_records: Infinity,
+
+ // The limit for how many bytes worth of data appearing in "payload"
+ // properties are allowed in a batch. (Same as max_post_bytes, but for
+ // batches).
+ max_total_bytes: Infinity,
+
+ // The limit for the number of records allowed in a single post. (Same
+ // as max_post_records, but for batches).
+ max_total_records: Infinity,
+});
+
+// Manages a pair of (byte, count) limits for a PostQueue, such as
+// (max_post_bytes, max_post_records) or (max_total_bytes, max_total_records).
+class LimitTracker {
+ constructor(maxBytes, maxRecords) {
+ this.maxBytes = maxBytes;
+ this.maxRecords = maxRecords;
+ this.curBytes = 0;
+ this.curRecords = 0;
+ }
+
+ clear() {
+ this.curBytes = 0;
+ this.curRecords = 0;
+ }
+
+ canAddRecord(payloadSize) {
+ // The record counts are inclusive, but depending on the version of the
+ // server, the byte counts may or may not be inclusive (See
+ // https://github.com/mozilla-services/server-syncstorage/issues/73).
+ return (
+ this.curRecords + 1 <= this.maxRecords &&
+ this.curBytes + payloadSize < this.maxBytes
+ );
+ }
+
+ canNeverAdd(recordSize) {
+ return recordSize >= this.maxBytes;
+ }
+
+ didAddRecord(recordSize) {
+ if (!this.canAddRecord(recordSize)) {
+ // This is a bug, caller is expected to call canAddRecord first.
+ throw new Error(
+ "LimitTracker.canAddRecord must be checked before adding record"
+ );
+ }
+ this.curRecords += 1;
+ this.curBytes += recordSize;
+ }
+}
+
+/* A helper to manage the posting of records while respecting the various
+ size limits.
+
+ This supports the concept of a server-side "batch". The general idea is:
+ * We queue as many records as allowed in memory, then make a single POST.
+ * This first POST (optionally) gives us a batch ID, which we use for
+ all subsequent posts, until...
+ * At some point we hit a batch-maximum, and jump through a few hoops to
+ commit the current batch (ie, all previous POSTs) and start a new one.
+ * Eventually commit the final batch.
+
+ In most cases we expect there to be exactly 1 batch consisting of possibly
+ multiple POSTs.
+*/
+export function PostQueue(poster, timestamp, serverConfig, log, postCallback) {
+ // The "post" function we should use when it comes time to do the post.
+ this.poster = poster;
+ this.log = log;
+
+ let config = Object.assign({}, DefaultPostQueueConfig, serverConfig);
+
+ if (!serverConfig.max_request_bytes && serverConfig.max_post_bytes) {
+ // Use max_post_bytes for max_request_bytes if it's missing. Only needed
+ // until server-syncstorage/pull/74 is everywhere, and even then it's
+ // unnecessary if the server limits are configured sanely (there's no
+ // guarantee of -- at least before that is fully deployed)
+ config.max_request_bytes = serverConfig.max_post_bytes;
+ }
+
+ this.log.trace("new PostQueue config (after defaults): ", config);
+
+ // The callback we make with the response when we do get around to making the
+ // post (which could be during any of the enqueue() calls or the final flush())
+ // This callback may be called multiple times and must not add new items to
+ // the queue.
+ // The second argument passed to this callback is a boolean value that is true
+ // if we're in the middle of a batch, and false if either the batch is
+ // complete, or it's a post to a server that does not understand batching.
+ this.postCallback = postCallback;
+
+ // Tracks the count and combined payload size for the records we've queued
+ // so far but are yet to POST.
+ this.postLimits = new LimitTracker(
+ config.max_post_bytes,
+ config.max_post_records
+ );
+
+ // As above, but for the batch size.
+ this.batchLimits = new LimitTracker(
+ config.max_total_bytes,
+ config.max_total_records
+ );
+
+ // Limit for the size of `this.queued` before we do a post.
+ this.maxRequestBytes = config.max_request_bytes;
+
+ // Limit for the size of incoming record payloads.
+ this.maxPayloadBytes = config.max_record_payload_bytes;
+
+ // The string where we are capturing the stringified version of the records
+ // queued so far. It will always be invalid JSON as it is always missing the
+ // closing bracket. It's also used to track whether or not we've gone past
+ // maxRequestBytes.
+ this.queued = "";
+
+ // The ID of our current batch. Can be undefined (meaning we are yet to make
+ // the first post of a patch, so don't know if we have a batch), null (meaning
+ // we've made the first post but the server response indicated no batching
+ // semantics), otherwise we have made the first post and it holds the batch ID
+ // returned from the server.
+ this.batchID = undefined;
+
+ // Time used for X-If-Unmodified-Since -- should be the timestamp from the last GET.
+ this.lastModified = timestamp;
+}
+
+PostQueue.prototype = {
+ async enqueue(record) {
+ // We want to ensure the record has a .toJSON() method defined - even
+ // though JSON.stringify() would implicitly call it, the stringify might
+ // still work even if it isn't defined, which isn't what we want.
+ let jsonRepr = record.toJSON();
+ if (!jsonRepr) {
+ throw new Error(
+ "You must only call this with objects that explicitly support JSON"
+ );
+ }
+
+ let bytes = JSON.stringify(jsonRepr);
+
+ // We use the payload size for the LimitTrackers, since that's what the
+ // byte limits other than max_request_bytes refer to.
+ let payloadLength = jsonRepr.payload.length;
+
+ // The `+ 2` is to account for the 2-byte (maximum) overhead (one byte for
+ // the leading comma or "[", which all records will have, and the other for
+ // the final trailing "]", only present for the last record).
+ let encodedLength = bytes.length + 2;
+
+ // Check first if there's some limit that indicates we cannot ever enqueue
+ // this record.
+ let isTooBig =
+ this.postLimits.canNeverAdd(payloadLength) ||
+ this.batchLimits.canNeverAdd(payloadLength) ||
+ encodedLength >= this.maxRequestBytes ||
+ payloadLength >= this.maxPayloadBytes;
+
+ if (isTooBig) {
+ return {
+ enqueued: false,
+ error: new Error("Single record too large to submit to server"),
+ };
+ }
+
+ let canPostRecord = this.postLimits.canAddRecord(payloadLength);
+ let canBatchRecord = this.batchLimits.canAddRecord(payloadLength);
+ let canSendRecord =
+ this.queued.length + encodedLength < this.maxRequestBytes;
+
+ if (!canPostRecord || !canBatchRecord || !canSendRecord) {
+ this.log.trace("PostQueue flushing: ", {
+ canPostRecord,
+ canSendRecord,
+ canBatchRecord,
+ });
+ // We need to write the queue out before handling this one, but we only
+ // commit the batch (and thus start a new one) if the record couldn't fit
+ // inside the batch.
+ await this.flush(!canBatchRecord);
+ }
+
+ this.postLimits.didAddRecord(payloadLength);
+ this.batchLimits.didAddRecord(payloadLength);
+
+ // Either a ',' or a '[' depending on whether this is the first record.
+ this.queued += this.queued.length ? "," : "[";
+ this.queued += bytes;
+ return { enqueued: true };
+ },
+
+ async flush(finalBatchPost) {
+ if (!this.queued) {
+ // nothing queued - we can't be in a batch, and something has gone very
+ // bad if we think we are.
+ if (this.batchID) {
+ throw new Error(
+ `Flush called when no queued records but we are in a batch ${this.batchID}`
+ );
+ }
+ return;
+ }
+ // the batch query-param and headers we'll send.
+ let batch;
+ let headers = [];
+ if (this.batchID === undefined) {
+ // First commit in a (possible) batch.
+ batch = "true";
+ } else if (this.batchID) {
+ // We have an existing batch.
+ batch = this.batchID;
+ } else {
+ // Not the first post and we know we have no batch semantics.
+ batch = null;
+ }
+
+ headers.push(["x-if-unmodified-since", this.lastModified]);
+
+ let numQueued = this.postLimits.curRecords;
+ this.log.info(
+ `Posting ${numQueued} records of ${
+ this.queued.length + 1
+ } bytes with batch=${batch}`
+ );
+ let queued = this.queued + "]";
+ if (finalBatchPost) {
+ this.batchLimits.clear();
+ }
+ this.postLimits.clear();
+ this.queued = "";
+ let response = await this.poster(
+ queued,
+ headers,
+ batch,
+ !!(finalBatchPost && this.batchID !== null)
+ );
+
+ if (!response.success) {
+ this.log.trace("Server error response during a batch", response);
+ // not clear what we should do here - we expect the consumer of this to
+ // abort by throwing in the postCallback below.
+ await this.postCallback(this, response, !finalBatchPost);
+ return;
+ }
+
+ if (finalBatchPost) {
+ this.log.trace("Committed batch", this.batchID);
+ this.batchID = undefined; // we are now in "first post for the batch" state.
+ this.lastModified = response.headers["x-last-modified"];
+ await this.postCallback(this, response, false);
+ return;
+ }
+
+ if (response.status != 202) {
+ if (this.batchID) {
+ throw new Error(
+ "Server responded non-202 success code while a batch was in progress"
+ );
+ }
+ this.batchID = null; // no batch semantics are in place.
+ this.lastModified = response.headers["x-last-modified"];
+ await this.postCallback(this, response, false);
+ return;
+ }
+
+ // this response is saying the server has batch semantics - we should
+ // always have a batch ID in the response.
+ let responseBatchID = response.obj.batch;
+ this.log.trace("Server responsed 202 with batch", responseBatchID);
+ if (!responseBatchID) {
+ this.log.error(
+ "Invalid server response: 202 without a batch ID",
+ response
+ );
+ throw new Error("Invalid server response: 202 without a batch ID");
+ }
+
+ if (this.batchID === undefined) {
+ this.batchID = responseBatchID;
+ if (!this.lastModified) {
+ this.lastModified = response.headers["x-last-modified"];
+ if (!this.lastModified) {
+ throw new Error("Batch response without x-last-modified");
+ }
+ }
+ }
+
+ if (this.batchID != responseBatchID) {
+ throw new Error(
+ `Invalid client/server batch state - client has ${this.batchID}, server has ${responseBatchID}`
+ );
+ }
+
+ await this.postCallback(this, response, true);
+ },
+};
diff --git a/services/sync/modules/resource.sys.mjs b/services/sync/modules/resource.sys.mjs
new file mode 100644
index 0000000000..be7815d534
--- /dev/null
+++ b/services/sync/modules/resource.sys.mjs
@@ -0,0 +1,292 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Observers } from "resource://services-common/observers.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { Utils } from "resource://services-sync/util.sys.mjs";
+import { setTimeout, clearTimeout } from "resource://gre/modules/Timer.sys.mjs";
+
+/* global AbortController */
+
+/*
+ * Resource represents a remote network resource, identified by a URI.
+ * Create an instance like so:
+ *
+ * let resource = new Resource("http://foobar.com/path/to/resource");
+ *
+ * The 'resource' object has the following methods to issue HTTP requests
+ * of the corresponding HTTP methods:
+ *
+ * get(callback)
+ * put(data, callback)
+ * post(data, callback)
+ * delete(callback)
+ */
+export function Resource(uri) {
+ this._log = Log.repository.getLogger(this._logName);
+ this._log.manageLevelFromPref("services.sync.log.logger.network.resources");
+ this.uri = uri;
+ this._headers = {};
+}
+
+// (static) Caches the latest server timestamp (X-Weave-Timestamp header).
+Resource.serverTime = null;
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ Resource,
+ "SEND_VERSION_INFO",
+ "services.sync.sendVersionInfo",
+ true
+);
+Resource.prototype = {
+ _logName: "Sync.Resource",
+
+ /**
+ * Callback to be invoked at request time to add authentication details.
+ * If the callback returns a promise, it will be awaited upon.
+ *
+ * By default, a global authenticator is provided. If this is set, it will
+ * be used instead of the global one.
+ */
+ authenticator: null,
+
+ // Wait 5 minutes before killing a request.
+ ABORT_TIMEOUT: 300000,
+
+ // Headers to be included when making a request for the resource.
+ // Note: Header names should be all lower case, there's no explicit
+ // check for duplicates due to case!
+ get headers() {
+ return this._headers;
+ },
+ set headers(_) {
+ throw new Error("headers can't be mutated directly. Please use setHeader.");
+ },
+ setHeader(header, value) {
+ this._headers[header.toLowerCase()] = value;
+ },
+
+ // URI representing this resource.
+ get uri() {
+ return this._uri;
+ },
+ set uri(value) {
+ if (typeof value == "string") {
+ this._uri = CommonUtils.makeURI(value);
+ } else {
+ this._uri = value;
+ }
+ },
+
+ // Get the string representation of the URI.
+ get spec() {
+ if (this._uri) {
+ return this._uri.spec;
+ }
+ return null;
+ },
+
+ /**
+ * @param {string} method HTTP method
+ * @returns {Headers}
+ */
+ async _buildHeaders(method) {
+ const headers = new Headers(this._headers);
+
+ if (Resource.SEND_VERSION_INFO) {
+ headers.append("user-agent", Utils.userAgent);
+ }
+
+ if (this.authenticator) {
+ const result = await this.authenticator(this, method);
+ if (result && result.headers) {
+ for (const [k, v] of Object.entries(result.headers)) {
+ headers.append(k.toLowerCase(), v);
+ }
+ }
+ } else {
+ this._log.debug("No authenticator found.");
+ }
+
+ // PUT and POST are treated differently because they have payload data.
+ if (("PUT" == method || "POST" == method) && !headers.has("content-type")) {
+ headers.append("content-type", "text/plain");
+ }
+
+ if (this._log.level <= Log.Level.Trace) {
+ for (const [k, v] of headers) {
+ if (k == "authorization" || k == "x-client-state") {
+ this._log.trace(`HTTP Header ${k}: ***** (suppressed)`);
+ } else {
+ this._log.trace(`HTTP Header ${k}: ${v}`);
+ }
+ }
+ }
+
+ if (!headers.has("accept")) {
+ headers.append("accept", "application/json;q=0.9,*/*;q=0.2");
+ }
+
+ return headers;
+ },
+
+ /**
+ * @param {string} method HTTP method
+ * @param {string} data HTTP body
+ * @param {object} signal AbortSignal instance
+ * @returns {Request}
+ */
+ async _createRequest(method, data, signal) {
+ const headers = await this._buildHeaders(method);
+ const init = {
+ cache: "no-store", // No cache.
+ headers,
+ method,
+ signal,
+ mozErrors: true, // Return nsresult error codes instead of a generic
+ // NetworkError when fetch rejects.
+ };
+
+ if (data) {
+ if (!(typeof data == "string" || data instanceof String)) {
+ data = JSON.stringify(data);
+ }
+ this._log.debug(`${method} Length: ${data.length}`);
+ this._log.trace(`${method} Body: ${data}`);
+ init.body = data;
+ }
+ return new Request(this.uri.spec, init);
+ },
+
+ /**
+ * @param {string} method HTTP method
+ * @param {string} [data] HTTP body
+ * @returns {Response}
+ */
+ async _doRequest(method, data = null) {
+ const controller = new AbortController();
+ const request = await this._createRequest(method, data, controller.signal);
+ const responsePromise = fetch(request); // Rejects on network failure.
+ let didTimeout = false;
+ const timeoutId = setTimeout(() => {
+ didTimeout = true;
+ this._log.error(
+ `Request timed out after ${this.ABORT_TIMEOUT}ms. Aborting.`
+ );
+ controller.abort();
+ }, this.ABORT_TIMEOUT);
+ let response;
+ try {
+ response = await responsePromise;
+ } catch (e) {
+ this._log.warn(`${method} request to ${this.uri.spec} failed`, e);
+ if (!didTimeout) {
+ throw e;
+ }
+ throw Components.Exception(
+ "Request aborted (timeout)",
+ Cr.NS_ERROR_NET_TIMEOUT
+ );
+ } finally {
+ clearTimeout(timeoutId);
+ }
+ return this._processResponse(response, method);
+ },
+
+ async _processResponse(response, method) {
+ const data = await response.text();
+ this._logResponse(response, method, data);
+ this._processResponseHeaders(response);
+
+ const ret = {
+ data,
+ url: response.url,
+ status: response.status,
+ success: response.ok,
+ headers: {},
+ };
+ for (const [k, v] of response.headers) {
+ ret.headers[k] = v;
+ }
+
+ // Make a lazy getter to convert the json response into an object.
+ // Note that this can cause a parse error to be thrown far away from the
+ // actual fetch, so be warned!
+ XPCOMUtils.defineLazyGetter(ret, "obj", () => {
+ try {
+ return JSON.parse(ret.data);
+ } catch (ex) {
+ this._log.warn("Got exception parsing response body", ex);
+ // Stringify to avoid possibly printing non-printable characters.
+ this._log.debug(
+ "Parse fail: Response body starts",
+ (ret.data + "").slice(0, 100)
+ );
+ throw ex;
+ }
+ });
+
+ return ret;
+ },
+
+ _logResponse(response, method, data) {
+ const { status, ok: success, url } = response;
+
+ // Log the status of the request.
+ this._log.debug(
+ `${method} ${success ? "success" : "fail"} ${status} ${url}`
+ );
+
+ // Additionally give the full response body when Trace logging.
+ if (this._log.level <= Log.Level.Trace) {
+ this._log.trace(`${method} body`, data);
+ }
+
+ if (!success) {
+ this._log.warn(
+ `${method} request to ${url} failed with status ${status}`
+ );
+ }
+ },
+
+ _processResponseHeaders({ headers, ok: success }) {
+ if (headers.has("x-weave-timestamp")) {
+ Resource.serverTime = parseFloat(headers.get("x-weave-timestamp"));
+ }
+ // This is a server-side safety valve to allow slowing down
+ // clients without hurting performance.
+ if (headers.has("x-weave-backoff")) {
+ let backoff = headers.get("x-weave-backoff");
+ this._log.debug(`Got X-Weave-Backoff: ${backoff}`);
+ Observers.notify("weave:service:backoff:interval", parseInt(backoff, 10));
+ }
+
+ if (success && headers.has("x-weave-quota-remaining")) {
+ Observers.notify(
+ "weave:service:quota:remaining",
+ parseInt(headers.get("x-weave-quota-remaining"), 10)
+ );
+ }
+ },
+
+ get() {
+ return this._doRequest("GET");
+ },
+
+ put(data) {
+ return this._doRequest("PUT", data);
+ },
+
+ post(data) {
+ return this._doRequest("POST", data);
+ },
+
+ delete() {
+ return this._doRequest("DELETE");
+ },
+};
diff --git a/services/sync/modules/service.sys.mjs b/services/sync/modules/service.sys.mjs
new file mode 100644
index 0000000000..7204c18434
--- /dev/null
+++ b/services/sync/modules/service.sys.mjs
@@ -0,0 +1,1630 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const CRYPTO_COLLECTION = "crypto";
+const KEYS_WBO = "keys";
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+import { AppConstants } from "resource://gre/modules/AppConstants.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+
+import {
+ CLIENT_NOT_CONFIGURED,
+ CREDENTIALS_CHANGED,
+ HMAC_EVENT_INTERVAL,
+ LOGIN_FAILED,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_NETWORK_ERROR,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_NO_USERNAME,
+ LOGIN_FAILED_SERVER_ERROR,
+ LOGIN_SUCCEEDED,
+ MASTER_PASSWORD_LOCKED,
+ METARECORD_DOWNLOAD_FAIL,
+ NO_SYNC_NODE_FOUND,
+ PREFS_BRANCH,
+ STATUS_DISABLED,
+ STATUS_OK,
+ STORAGE_VERSION,
+ VERSION_OUT_OF_DATE,
+ WEAVE_VERSION,
+ kFirefoxShuttingDown,
+ kFirstSyncChoiceNotMade,
+ kSyncBackoffNotMet,
+ kSyncMasterPasswordLocked,
+ kSyncNetworkOffline,
+ kSyncNotConfigured,
+ kSyncWeaveDisabled,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { EngineManager } from "resource://services-sync/engines.sys.mjs";
+import { ClientEngine } from "resource://services-sync/engines/clients.sys.mjs";
+import { Weave } from "resource://services-sync/main.sys.mjs";
+import {
+ ErrorHandler,
+ SyncScheduler,
+} from "resource://services-sync/policies.sys.mjs";
+import {
+ CollectionKeyManager,
+ CryptoWrapper,
+ RecordManager,
+ WBORecord,
+} from "resource://services-sync/record.sys.mjs";
+import { Resource } from "resource://services-sync/resource.sys.mjs";
+import { EngineSynchronizer } from "resource://services-sync/stages/enginesync.sys.mjs";
+import { DeclinedEngines } from "resource://services-sync/stages/declined.sys.mjs";
+import { Status } from "resource://services-sync/status.sys.mjs";
+
+ChromeUtils.importESModule("resource://services-sync/telemetry.sys.mjs");
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { getFxAccountsSingleton } from "resource://gre/modules/FxAccounts.sys.mjs";
+
+const fxAccounts = getFxAccountsSingleton();
+
+function getEngineModules() {
+ let result = {
+ Addons: { module: "addons.js", symbol: "AddonsEngine" },
+ Password: { module: "passwords.js", symbol: "PasswordEngine" },
+ Prefs: { module: "prefs.js", symbol: "PrefsEngine" },
+ };
+ if (AppConstants.MOZ_APP_NAME != "thunderbird") {
+ result.Bookmarks = { module: "bookmarks.js", symbol: "BookmarksEngine" };
+ result.Form = { module: "forms.js", symbol: "FormEngine" };
+ result.History = { module: "history.js", symbol: "HistoryEngine" };
+ result.Tab = { module: "tabs.js", symbol: "TabEngine" };
+ }
+ if (Svc.Prefs.get("engine.addresses.available", false)) {
+ result.Addresses = {
+ module: "resource://autofill/FormAutofillSync.jsm",
+ symbol: "AddressesEngine",
+ };
+ }
+ if (Svc.Prefs.get("engine.creditcards.available", false)) {
+ result.CreditCards = {
+ module: "resource://autofill/FormAutofillSync.jsm",
+ symbol: "CreditCardsEngine",
+ };
+ }
+ result["Extension-Storage"] = {
+ module: "extension-storage.js",
+ controllingPref: "webextensions.storage.sync.kinto",
+ whenTrue: "ExtensionStorageEngineKinto",
+ whenFalse: "ExtensionStorageEngineBridge",
+ };
+ return result;
+}
+
+const lazy = {};
+
+// A unique identifier for this browser session. Used for logging so
+// we can easily see whether 2 logs are in the same browser session or
+// after the browser restarted.
+XPCOMUtils.defineLazyGetter(lazy, "browserSessionID", Utils.makeGUID);
+
+function Sync11Service() {
+ this._notify = Utils.notify("weave:service:");
+ Utils.defineLazyIDProperty(this, "syncID", "services.sync.client.syncID");
+}
+Sync11Service.prototype = {
+ _lock: Utils.lock,
+ _locked: false,
+ _loggedIn: false,
+
+ infoURL: null,
+ storageURL: null,
+ metaURL: null,
+ cryptoKeyURL: null,
+ // The cluster URL comes via the identity object, which in the FxA
+ // world is ebbedded in the token returned from the token server.
+ _clusterURL: null,
+
+ get clusterURL() {
+ return this._clusterURL || "";
+ },
+ set clusterURL(value) {
+ if (value != null && typeof value != "string") {
+ throw new Error("cluster must be a string, got " + typeof value);
+ }
+ this._clusterURL = value;
+ this._updateCachedURLs();
+ },
+
+ get isLoggedIn() {
+ return this._loggedIn;
+ },
+
+ get locked() {
+ return this._locked;
+ },
+ lock: function lock() {
+ if (this._locked) {
+ return false;
+ }
+ this._locked = true;
+ return true;
+ },
+ unlock: function unlock() {
+ this._locked = false;
+ },
+
+ // A specialized variant of Utils.catch.
+ // This provides a more informative error message when we're already syncing:
+ // see Bug 616568.
+ _catch(func) {
+ function lockExceptions(ex) {
+ if (Utils.isLockException(ex)) {
+ // This only happens if we're syncing already.
+ this._log.info("Cannot start sync: already syncing?");
+ }
+ }
+
+ return Utils.catch.call(this, func, lockExceptions);
+ },
+
+ get userBaseURL() {
+ // The user URL is the cluster URL.
+ return this.clusterURL;
+ },
+
+ _updateCachedURLs: function _updateCachedURLs() {
+ // Nothing to cache yet if we don't have the building blocks
+ if (!this.clusterURL) {
+ // Also reset all other URLs used by Sync to ensure we aren't accidentally
+ // using one cached earlier - if there's no cluster URL any cached ones
+ // are invalid.
+ this.infoURL = undefined;
+ this.storageURL = undefined;
+ this.metaURL = undefined;
+ this.cryptoKeysURL = undefined;
+ return;
+ }
+
+ this._log.debug(
+ "Caching URLs under storage user base: " + this.userBaseURL
+ );
+
+ // Generate and cache various URLs under the storage API for this user
+ this.infoURL = this.userBaseURL + "info/collections";
+ this.storageURL = this.userBaseURL + "storage/";
+ this.metaURL = this.storageURL + "meta/global";
+ this.cryptoKeysURL = this.storageURL + CRYPTO_COLLECTION + "/" + KEYS_WBO;
+ },
+
+ _checkCrypto: function _checkCrypto() {
+ let ok = false;
+
+ try {
+ let iv = Weave.Crypto.generateRandomIV();
+ if (iv.length == 24) {
+ ok = true;
+ }
+ } catch (e) {
+ this._log.debug("Crypto check failed: " + e);
+ }
+
+ return ok;
+ },
+
+ /**
+ * Here is a disgusting yet reasonable way of handling HMAC errors deep in
+ * the guts of Sync. The astute reader will note that this is a hacky way of
+ * implementing something like continuable conditions.
+ *
+ * A handler function is glued to each engine. If the engine discovers an
+ * HMAC failure, we fetch keys from the server and update our keys, just as
+ * we would on startup.
+ *
+ * If our key collection changed, we signal to the engine (via our return
+ * value) that it should retry decryption.
+ *
+ * If our key collection did not change, it means that we already had the
+ * correct keys... and thus a different client has the wrong ones. Reupload
+ * the bundle that we fetched, which will bump the modified time on the
+ * server and (we hope) prompt a broken client to fix itself.
+ *
+ * We keep track of the time at which we last applied this reasoning, because
+ * thrashing doesn't solve anything. We keep a reasonable interval between
+ * these remedial actions.
+ */
+ lastHMACEvent: 0,
+
+ /*
+ * Returns whether to try again.
+ */
+ async handleHMACEvent() {
+ let now = Date.now();
+
+ // Leave a sizable delay between HMAC recovery attempts. This gives us
+ // time for another client to fix themselves if we touch the record.
+ if (now - this.lastHMACEvent < HMAC_EVENT_INTERVAL) {
+ return false;
+ }
+
+ this._log.info(
+ "Bad HMAC event detected. Attempting recovery " +
+ "or signaling to other clients."
+ );
+
+ // Set the last handled time so that we don't act again.
+ this.lastHMACEvent = now;
+
+ // Fetch keys.
+ let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ try {
+ let cryptoResp = (
+ await cryptoKeys.fetch(this.resource(this.cryptoKeysURL))
+ ).response;
+
+ // Save out the ciphertext for when we reupload. If there's a bug in
+ // CollectionKeyManager, this will prevent us from uploading junk.
+ let cipherText = cryptoKeys.ciphertext;
+
+ if (!cryptoResp.success) {
+ this._log.warn("Failed to download keys.");
+ return false;
+ }
+
+ let keysChanged = await this.handleFetchedKeys(
+ this.identity.syncKeyBundle,
+ cryptoKeys,
+ true
+ );
+ if (keysChanged) {
+ // Did they change? If so, carry on.
+ this._log.info("Suggesting retry.");
+ return true; // Try again.
+ }
+
+ // If not, reupload them and continue the current sync.
+ cryptoKeys.ciphertext = cipherText;
+ cryptoKeys.cleartext = null;
+
+ let uploadResp = await this._uploadCryptoKeys(
+ cryptoKeys,
+ cryptoResp.obj.modified
+ );
+ if (uploadResp.success) {
+ this._log.info("Successfully re-uploaded keys. Continuing sync.");
+ } else {
+ this._log.warn(
+ "Got error response re-uploading keys. " +
+ "Continuing sync; let's try again later."
+ );
+ }
+
+ return false; // Don't try again: same keys.
+ } catch (ex) {
+ this._log.warn(
+ "Got exception fetching and handling crypto keys. " +
+ "Will try again later.",
+ ex
+ );
+ return false;
+ }
+ },
+
+ async handleFetchedKeys(syncKey, cryptoKeys, skipReset) {
+ // Don't want to wipe if we're just starting up!
+ let wasBlank = this.collectionKeys.isClear;
+ let keysChanged = await this.collectionKeys.updateContents(
+ syncKey,
+ cryptoKeys
+ );
+
+ if (keysChanged && !wasBlank) {
+ this._log.debug("Keys changed: " + JSON.stringify(keysChanged));
+
+ if (!skipReset) {
+ this._log.info("Resetting client to reflect key change.");
+
+ if (keysChanged.length) {
+ // Collection keys only. Reset individual engines.
+ await this.resetClient(keysChanged);
+ } else {
+ // Default key changed: wipe it all.
+ await this.resetClient();
+ }
+
+ this._log.info("Downloaded new keys, client reset. Proceeding.");
+ }
+ return true;
+ }
+ return false;
+ },
+
+ /**
+ * Prepare to initialize the rest of Weave after waiting a little bit
+ */
+ async onStartup() {
+ this.status = Status;
+ this.identity = Status._authManager;
+ this.collectionKeys = new CollectionKeyManager();
+
+ this.scheduler = new SyncScheduler(this);
+ this.errorHandler = new ErrorHandler(this);
+
+ this._log = Log.repository.getLogger("Sync.Service");
+ this._log.manageLevelFromPref("services.sync.log.logger.service.main");
+
+ this._log.info("Loading Weave " + WEAVE_VERSION);
+
+ this.recordManager = new RecordManager(this);
+
+ this.enabled = true;
+
+ await this._registerEngines();
+
+ let ua = Cc["@mozilla.org/network/protocol;1?name=http"].getService(
+ Ci.nsIHttpProtocolHandler
+ ).userAgent;
+ this._log.info(ua);
+
+ if (!this._checkCrypto()) {
+ this.enabled = false;
+ this._log.info(
+ "Could not load the Weave crypto component. Disabling " +
+ "Weave, since it will not work correctly."
+ );
+ }
+
+ Svc.Obs.add("weave:service:setup-complete", this);
+ Svc.Obs.add("sync:collection_changed", this); // Pulled from FxAccountsCommon
+ Svc.Obs.add("fxaccounts:device_disconnected", this);
+ Services.prefs.addObserver(PREFS_BRANCH + "engine.", this);
+
+ if (!this.enabled) {
+ this._log.info("Firefox Sync disabled.");
+ }
+
+ this._updateCachedURLs();
+
+ let status = this._checkSetup();
+ if (status != STATUS_DISABLED && status != CLIENT_NOT_CONFIGURED) {
+ this._startTracking();
+ }
+
+ // Send an event now that Weave service is ready. We don't do this
+ // synchronously so that observers can import this module before
+ // registering an observer.
+ CommonUtils.nextTick(() => {
+ this.status.ready = true;
+
+ // UI code uses the flag on the XPCOM service so it doesn't have
+ // to load a bunch of modules.
+ let xps = Cc["@mozilla.org/weave/service;1"].getService(
+ Ci.nsISupports
+ ).wrappedJSObject;
+ xps.ready = true;
+
+ Svc.Obs.notify("weave:service:ready");
+ });
+ },
+
+ _checkSetup: function _checkSetup() {
+ if (!this.enabled) {
+ return (this.status.service = STATUS_DISABLED);
+ }
+ return this.status.checkSetup();
+ },
+
+ /**
+ * Register the built-in engines for certain applications
+ */
+ async _registerEngines() {
+ this.engineManager = new EngineManager(this);
+
+ let engineModules = getEngineModules();
+
+ let engines = [];
+ // We allow a pref, which has no default value, to limit the engines
+ // which are registered. We expect only tests will use this.
+ if (Svc.Prefs.has("registerEngines")) {
+ engines = Svc.Prefs.get("registerEngines").split(",");
+ this._log.info("Registering custom set of engines", engines);
+ } else {
+ // default is all engines.
+ engines = Object.keys(engineModules);
+ }
+
+ let declined = [];
+ let pref = Svc.Prefs.get("declinedEngines");
+ if (pref) {
+ declined = pref.split(",");
+ }
+
+ let clientsEngine = new ClientEngine(this);
+ // Ideally clientsEngine should not exist
+ // (or be a promise that calls initialize() before returning the engine)
+ await clientsEngine.initialize();
+ this.clientsEngine = clientsEngine;
+
+ for (let name of engines) {
+ if (!(name in engineModules)) {
+ this._log.info("Do not know about engine: " + name);
+ continue;
+ }
+ let modInfo = engineModules[name];
+ if (!modInfo.module.includes(":")) {
+ modInfo.module = "resource://services-sync/engines/" + modInfo.module;
+ }
+ try {
+ let ns = ChromeUtils.import(modInfo.module);
+ if (modInfo.symbol) {
+ let symbol = modInfo.symbol;
+ if (!(symbol in ns)) {
+ this._log.warn(
+ "Could not find exported engine instance: " + symbol
+ );
+ continue;
+ }
+ await this.engineManager.register(ns[symbol]);
+ } else {
+ let { whenTrue, whenFalse, controllingPref } = modInfo;
+ if (!(whenTrue in ns) || !(whenFalse in ns)) {
+ this._log.warn("Could not find all exported engine instances", {
+ whenTrue,
+ whenFalse,
+ });
+ continue;
+ }
+ await this.engineManager.registerAlternatives(
+ name.toLowerCase(),
+ controllingPref,
+ ns[whenTrue],
+ ns[whenFalse]
+ );
+ }
+ } catch (ex) {
+ this._log.warn("Could not register engine " + name, ex);
+ }
+ }
+
+ this.engineManager.setDeclined(declined);
+ },
+
+ /**
+ * This method updates the local engines state from an existing meta/global
+ * when Sync is disabled.
+ * Running this code if sync is enabled would end up in very weird results
+ * (but we're nice and we check before doing anything!).
+ */
+ async updateLocalEnginesState() {
+ await this.promiseInitialized;
+
+ // Sanity check, this method is not meant to be run if Sync is enabled!
+ if (Svc.Prefs.get("username", "")) {
+ throw new Error("Sync is enabled!");
+ }
+
+ // For historical reasons the behaviour of setCluster() is bizarre,
+ // so just check what we care about - the meta URL.
+ if (!this.metaURL) {
+ await this.identity.setCluster();
+ if (!this.metaURL) {
+ this._log.warn("Could not find a cluster.");
+ return;
+ }
+ }
+ // Clear the cache so we always fetch the latest meta/global.
+ this.recordManager.clearCache();
+ let meta = await this.recordManager.get(this.metaURL);
+ if (!meta) {
+ this._log.info("Meta record is null, aborting engine state update.");
+ return;
+ }
+ const declinedEngines = meta.payload.declined;
+ const allEngines = this.engineManager.getAll().map(e => e.name);
+ // We don't want our observer of the enabled prefs to treat the change as
+ // a user-change, otherwise we will do the wrong thing with declined etc.
+ this._ignorePrefObserver = true;
+ try {
+ for (const engine of allEngines) {
+ Svc.Prefs.set(`engine.${engine}`, !declinedEngines.includes(engine));
+ }
+ } finally {
+ this._ignorePrefObserver = false;
+ }
+ },
+
+ QueryInterface: ChromeUtils.generateQI([
+ "nsIObserver",
+ "nsISupportsWeakReference",
+ ]),
+
+ observe(subject, topic, data) {
+ switch (topic) {
+ // Ideally this observer should be in the SyncScheduler, but it would require
+ // some work to know about the sync specific engines. We should move this there once it does.
+ case "sync:collection_changed":
+ // We check if we're running TPS here to avoid TPS failing because it
+ // couldn't get to get the sync lock, due to us currently syncing the
+ // clients engine.
+ if (data.includes("clients") && !Svc.Prefs.get("testing.tps", false)) {
+ // Sync in the background (it's fine not to wait on the returned promise
+ // because sync() has a lock).
+ // [] = clients collection only
+ this.sync({ why: "collection_changed", engines: [] }).catch(e => {
+ this._log.error(e);
+ });
+ }
+ break;
+ case "fxaccounts:device_disconnected":
+ data = JSON.parse(data);
+ if (!data.isLocalDevice) {
+ // Refresh the known stale clients list in the background.
+ this.clientsEngine.updateKnownStaleClients().catch(e => {
+ this._log.error(e);
+ });
+ }
+ break;
+ case "weave:service:setup-complete":
+ let status = this._checkSetup();
+ if (status != STATUS_DISABLED && status != CLIENT_NOT_CONFIGURED) {
+ this._startTracking();
+ }
+ break;
+ case "nsPref:changed":
+ if (this._ignorePrefObserver) {
+ return;
+ }
+ const engine = data.slice((PREFS_BRANCH + "engine.").length);
+ if (engine.includes(".")) {
+ // A sub-preference of the engine was changed. For example
+ // `services.sync.engine.bookmarks.validation.percentageChance`.
+ return;
+ }
+ this._handleEngineStatusChanged(engine);
+ break;
+ }
+ },
+
+ _handleEngineStatusChanged(engine) {
+ this._log.trace("Status for " + engine + " engine changed.");
+ if (Svc.Prefs.get("engineStatusChanged." + engine, false)) {
+ // The enabled status being changed back to what it was before.
+ Svc.Prefs.reset("engineStatusChanged." + engine);
+ } else {
+ // Remember that the engine status changed locally until the next sync.
+ Svc.Prefs.set("engineStatusChanged." + engine, true);
+ }
+ },
+
+ _startTracking() {
+ const engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ for (let engine of engines) {
+ try {
+ engine.startTracking();
+ } catch (e) {
+ this._log.error(`Could not start ${engine.name} engine tracker`, e);
+ }
+ }
+ // This is for TPS. We should try to do better.
+ Svc.Obs.notify("weave:service:tracking-started");
+ },
+
+ async _stopTracking() {
+ const engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ for (let engine of engines) {
+ try {
+ await engine.stopTracking();
+ } catch (e) {
+ this._log.error(`Could not stop ${engine.name} engine tracker`, e);
+ }
+ }
+ Svc.Obs.notify("weave:service:tracking-stopped");
+ },
+
+ /**
+ * Obtain a Resource instance with authentication credentials.
+ */
+ resource: function resource(url) {
+ let res = new Resource(url);
+ res.authenticator = this.identity.getResourceAuthenticator();
+
+ return res;
+ },
+
+ /**
+ * Perform the info fetch as part of a login or key fetch, or
+ * inside engine sync.
+ */
+ async _fetchInfo(url) {
+ let infoURL = url || this.infoURL;
+
+ this._log.trace("In _fetchInfo: " + infoURL);
+ let info;
+ try {
+ info = await this.resource(infoURL).get();
+ } catch (ex) {
+ this.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+
+ // Always check for errors.
+ this.errorHandler.checkServerError(info);
+ if (!info.success) {
+ this._log.error("Aborting sync: failed to get collections.");
+ throw info;
+ }
+ return info;
+ },
+
+ async verifyAndFetchSymmetricKeys(infoResponse) {
+ this._log.debug(
+ "Fetching and verifying -- or generating -- symmetric keys."
+ );
+
+ let syncKeyBundle = this.identity.syncKeyBundle;
+ if (!syncKeyBundle) {
+ this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
+ this.status.sync = CREDENTIALS_CHANGED;
+ return false;
+ }
+
+ try {
+ if (!infoResponse) {
+ infoResponse = await this._fetchInfo(); // Will throw an exception on failure.
+ }
+
+ // This only applies when the server is already at version 4.
+ if (infoResponse.status != 200) {
+ this._log.warn(
+ "info/collections returned non-200 response. Failing key fetch."
+ );
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(infoResponse);
+ return false;
+ }
+
+ let infoCollections = infoResponse.obj;
+
+ this._log.info(
+ "Testing info/collections: " + JSON.stringify(infoCollections)
+ );
+
+ if (this.collectionKeys.updateNeeded(infoCollections)) {
+ this._log.info("collection keys reports that a key update is needed.");
+
+ // Don't always set to CREDENTIALS_CHANGED -- we will probably take care of this.
+
+ // Fetch storage/crypto/keys.
+ let cryptoKeys;
+
+ if (infoCollections && CRYPTO_COLLECTION in infoCollections) {
+ try {
+ cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let cryptoResp = (
+ await cryptoKeys.fetch(this.resource(this.cryptoKeysURL))
+ ).response;
+
+ if (cryptoResp.success) {
+ await this.handleFetchedKeys(syncKeyBundle, cryptoKeys);
+ return true;
+ } else if (cryptoResp.status == 404) {
+ // On failure, ask to generate new keys and upload them.
+ // Fall through to the behavior below.
+ this._log.warn(
+ "Got 404 for crypto/keys, but 'crypto' in info/collections. Regenerating."
+ );
+ cryptoKeys = null;
+ } else {
+ // Some other problem.
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(cryptoResp);
+ this._log.warn(
+ "Got status " + cryptoResp.status + " fetching crypto keys."
+ );
+ return false;
+ }
+ } catch (ex) {
+ this._log.warn("Got exception fetching cryptoKeys.", ex);
+ // TODO: Um, what exceptions might we get here? Should we re-throw any?
+
+ // One kind of exception: HMAC failure.
+ if (Utils.isHMACMismatch(ex)) {
+ this.status.login = LOGIN_FAILED_INVALID_PASSPHRASE;
+ this.status.sync = CREDENTIALS_CHANGED;
+ } else {
+ // In the absence of further disambiguation or more precise
+ // failure constants, just report failure.
+ this.status.login = LOGIN_FAILED;
+ }
+ return false;
+ }
+ } else {
+ this._log.info(
+ "... 'crypto' is not a reported collection. Generating new keys."
+ );
+ }
+
+ if (!cryptoKeys) {
+ this._log.info("No keys! Generating new ones.");
+
+ // Better make some and upload them, and wipe the server to ensure
+ // consistency. This is all achieved via _freshStart.
+ // If _freshStart fails to clear the server or upload keys, it will
+ // throw.
+ await this._freshStart();
+ return true;
+ }
+
+ // Last-ditch case.
+ return false;
+ }
+ // No update needed: we're good!
+ return true;
+ } catch (ex) {
+ // This means no keys are present, or there's a network error.
+ this._log.debug("Failed to fetch and verify keys", ex);
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+ },
+
+ getMaxRecordPayloadSize() {
+ let config = this.serverConfiguration;
+ if (!config || !config.max_record_payload_bytes) {
+ this._log.warn(
+ "No config or incomplete config in getMaxRecordPayloadSize." +
+ " Are we running tests?"
+ );
+ return 256 * 1024;
+ }
+ let payloadMax = config.max_record_payload_bytes;
+ if (config.max_post_bytes && payloadMax <= config.max_post_bytes) {
+ return config.max_post_bytes - 4096;
+ }
+ return payloadMax;
+ },
+
+ getMemcacheMaxRecordPayloadSize() {
+ // Collections stored in memcached ("tabs", "clients" or "meta") have a
+ // different max size than ones stored in the normal storage server db.
+ // In practice, the real limit here is 1M (bug 1300451 comment 40), but
+ // there's overhead involved that is hard to calculate on the client, so we
+ // use 512k to be safe (at the recommendation of the server team). Note
+ // that if the server reports a lower limit (via info/configuration), we
+ // respect that limit instead. See also bug 1403052.
+ return Math.min(512 * 1024, this.getMaxRecordPayloadSize());
+ },
+
+ async verifyLogin(allow40XRecovery = true) {
+ // Attaching auth credentials to a request requires access to
+ // passwords, which means that Resource.get can throw MP-related
+ // exceptions!
+ // So we ask the identity to verify the login state after unlocking the
+ // master password (ie, this call is expected to prompt for MP unlock
+ // if necessary) while we still have control.
+ this.status.login = await this.identity.unlockAndVerifyAuthState();
+ this._log.debug(
+ "Fetching unlocked auth state returned " + this.status.login
+ );
+ if (this.status.login != STATUS_OK) {
+ return false;
+ }
+
+ try {
+ // Make sure we have a cluster to verify against.
+ // This is a little weird, if we don't get a node we pretend
+ // to succeed, since that probably means we just don't have storage.
+ if (this.clusterURL == "" && !(await this.identity.setCluster())) {
+ this.status.sync = NO_SYNC_NODE_FOUND;
+ return true;
+ }
+
+ // Fetch collection info on every startup.
+ let test = await this.resource(this.infoURL).get();
+
+ switch (test.status) {
+ case 200:
+ // The user is authenticated.
+
+ // We have no way of verifying the passphrase right now,
+ // so wait until remoteSetup to do so.
+ // Just make the most trivial checks.
+ if (!this.identity.syncKeyBundle) {
+ this._log.warn("No passphrase in verifyLogin.");
+ this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
+ return false;
+ }
+
+ // Go ahead and do remote setup, so that we can determine
+ // conclusively that our passphrase is correct.
+ if (await this._remoteSetup(test)) {
+ // Username/password verified.
+ this.status.login = LOGIN_SUCCEEDED;
+ return true;
+ }
+
+ this._log.warn("Remote setup failed.");
+ // Remote setup must have failed.
+ return false;
+
+ case 401:
+ this._log.warn("401: login failed.");
+ // Fall through to the 404 case.
+
+ case 404:
+ // Check that we're verifying with the correct cluster
+ if (allow40XRecovery && (await this.identity.setCluster())) {
+ return await this.verifyLogin(false);
+ }
+
+ // We must have the right cluster, but the server doesn't expect us.
+ // For FxA this almost certainly means "transient error fetching token".
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ return false;
+
+ default:
+ // Server didn't respond with something that we expected
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(test);
+ return false;
+ }
+ } catch (ex) {
+ // Must have failed on some network issue
+ this._log.debug("verifyLogin failed", ex);
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+ },
+
+ async generateNewSymmetricKeys() {
+ this._log.info("Generating new keys WBO...");
+ let wbo = await this.collectionKeys.generateNewKeysWBO();
+ this._log.info("Encrypting new key bundle.");
+ await wbo.encrypt(this.identity.syncKeyBundle);
+
+ let uploadRes = await this._uploadCryptoKeys(wbo, 0);
+ if (uploadRes.status != 200) {
+ this._log.warn(
+ "Got status " +
+ uploadRes.status +
+ " uploading new keys. What to do? Throw!"
+ );
+ this.errorHandler.checkServerError(uploadRes);
+ throw new Error("Unable to upload symmetric keys.");
+ }
+ this._log.info("Got status " + uploadRes.status + " uploading keys.");
+ let serverModified = uploadRes.obj; // Modified timestamp according to server.
+ this._log.debug("Server reports crypto modified: " + serverModified);
+
+ // Now verify that info/collections shows them!
+ this._log.debug("Verifying server collection records.");
+ let info = await this._fetchInfo();
+ this._log.debug("info/collections is: " + info.data);
+
+ if (info.status != 200) {
+ this._log.warn("Non-200 info/collections response. Aborting.");
+ throw new Error("Unable to upload symmetric keys.");
+ }
+
+ info = info.obj;
+ if (!(CRYPTO_COLLECTION in info)) {
+ this._log.error(
+ "Consistency failure: info/collections excludes " +
+ "crypto after successful upload."
+ );
+ throw new Error("Symmetric key upload failed.");
+ }
+
+ // Can't check against local modified: clock drift.
+ if (info[CRYPTO_COLLECTION] < serverModified) {
+ this._log.error(
+ "Consistency failure: info/collections crypto entry " +
+ "is stale after successful upload."
+ );
+ throw new Error("Symmetric key upload failed.");
+ }
+
+ // Doesn't matter if the timestamp is ahead.
+
+ // Download and install them.
+ let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let cryptoResp = (await cryptoKeys.fetch(this.resource(this.cryptoKeysURL)))
+ .response;
+ if (cryptoResp.status != 200) {
+ this._log.warn("Failed to download keys.");
+ throw new Error("Symmetric key download failed.");
+ }
+ let keysChanged = await this.handleFetchedKeys(
+ this.identity.syncKeyBundle,
+ cryptoKeys,
+ true
+ );
+ if (keysChanged) {
+ this._log.info("Downloaded keys differed, as expected.");
+ }
+ },
+
+ // configures/enabled/turns-on sync. There must be an FxA user signed in.
+ async configure() {
+ // We don't, and must not, throw if sync is already configured, because we
+ // might end up being called as part of a "reconnect" flow. We also want to
+ // avoid checking the FxA user is the same as the pref because the email
+ // address for the FxA account can change - we'd need to use the uid.
+ let user = await fxAccounts.getSignedInUser();
+ if (!user) {
+ throw new Error("No FxA user is signed in");
+ }
+ this._log.info("Configuring sync with current FxA user");
+ Svc.Prefs.set("username", user.email);
+ Svc.Obs.notify("weave:connected");
+ },
+
+ // resets/turns-off sync.
+ async startOver() {
+ this._log.trace("Invoking Service.startOver.");
+ await this._stopTracking();
+ this.status.resetSync();
+
+ // Deletion doesn't make sense if we aren't set up yet!
+ if (this.clusterURL != "") {
+ // Clear client-specific data from the server, including disabled engines.
+ const engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ for (let engine of engines) {
+ try {
+ await engine.removeClientData();
+ } catch (ex) {
+ this._log.warn(`Deleting client data for ${engine.name} failed`, ex);
+ }
+ }
+ this._log.debug("Finished deleting client data.");
+ } else {
+ this._log.debug("Skipping client data removal: no cluster URL.");
+ }
+
+ this.identity.resetCredentials();
+ this.status.login = LOGIN_FAILED_NO_USERNAME;
+ this.logout();
+ Svc.Obs.notify("weave:service:start-over");
+
+ // Reset all engines and clear keys.
+ await this.resetClient();
+ this.collectionKeys.clear();
+ this.status.resetBackoff();
+
+ // Reset Weave prefs.
+ this._ignorePrefObserver = true;
+ Svc.Prefs.resetBranch("");
+ this._ignorePrefObserver = false;
+ this.clusterURL = null;
+
+ Svc.Prefs.set("lastversion", WEAVE_VERSION);
+
+ try {
+ this.identity.finalize();
+ this.status.__authManager = null;
+ this.identity = Status._authManager;
+ Svc.Obs.notify("weave:service:start-over:finish");
+ } catch (err) {
+ this._log.error(
+ "startOver failed to re-initialize the identity manager",
+ err
+ );
+ // Still send the observer notification so the current state is
+ // reflected in the UI.
+ Svc.Obs.notify("weave:service:start-over:finish");
+ }
+ },
+
+ async login() {
+ async function onNotify() {
+ this._loggedIn = false;
+ if (this.scheduler.offline) {
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ throw new Error("Application is offline, login should not be called");
+ }
+
+ this._log.info("User logged in successfully - verifying login.");
+ if (!(await this.verifyLogin())) {
+ // verifyLogin sets the failure states here.
+ throw new Error(`Login failed: ${this.status.login}`);
+ }
+
+ this._updateCachedURLs();
+
+ this._loggedIn = true;
+
+ return true;
+ }
+
+ let notifier = this._notify("login", "", onNotify.bind(this));
+ return this._catch(this._lock("service.js: login", notifier))();
+ },
+
+ logout: function logout() {
+ // If we failed during login, we aren't going to have this._loggedIn set,
+ // but we still want to ask the identity to logout, so it doesn't try and
+ // reuse any old credentials next time we sync.
+ this._log.info("Logging out");
+ this.identity.logout();
+ this._loggedIn = false;
+
+ Svc.Obs.notify("weave:service:logout:finish");
+ },
+
+ // Note: returns false if we failed for a reason other than the server not yet
+ // supporting the api.
+ async _fetchServerConfiguration() {
+ // This is similar to _fetchInfo, but with different error handling.
+
+ let infoURL = this.userBaseURL + "info/configuration";
+ this._log.debug("Fetching server configuration", infoURL);
+ let configResponse;
+ try {
+ configResponse = await this.resource(infoURL).get();
+ } catch (ex) {
+ // This is probably a network or similar error.
+ this._log.warn("Failed to fetch info/configuration", ex);
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+
+ if (configResponse.status == 404) {
+ // This server doesn't support the URL yet - that's OK.
+ this._log.debug(
+ "info/configuration returned 404 - using default upload semantics"
+ );
+ } else if (configResponse.status != 200) {
+ this._log.warn(
+ `info/configuration returned ${configResponse.status} - using default configuration`
+ );
+ this.errorHandler.checkServerError(configResponse);
+ return false;
+ } else {
+ this.serverConfiguration = configResponse.obj;
+ }
+ this._log.trace(
+ "info/configuration for this server",
+ this.serverConfiguration
+ );
+ return true;
+ },
+
+ // Stuff we need to do after login, before we can really do
+ // anything (e.g. key setup).
+ async _remoteSetup(infoResponse, fetchConfig = true) {
+ if (fetchConfig && !(await this._fetchServerConfiguration())) {
+ return false;
+ }
+
+ this._log.debug("Fetching global metadata record");
+ let meta = await this.recordManager.get(this.metaURL);
+
+ // Checking modified time of the meta record.
+ if (
+ infoResponse &&
+ infoResponse.obj.meta != this.metaModified &&
+ (!meta || !meta.isNew)
+ ) {
+ // Delete the cached meta record...
+ this._log.debug(
+ "Clearing cached meta record. metaModified is " +
+ JSON.stringify(this.metaModified) +
+ ", setting to " +
+ JSON.stringify(infoResponse.obj.meta)
+ );
+
+ this.recordManager.del(this.metaURL);
+
+ // ... fetch the current record from the server, and COPY THE FLAGS.
+ let newMeta = await this.recordManager.get(this.metaURL);
+
+ // If we got a 401, we do not want to create a new meta/global - we
+ // should be able to get the existing meta after we get a new node.
+ if (this.recordManager.response.status == 401) {
+ this._log.debug(
+ "Fetching meta/global record on the server returned 401."
+ );
+ this.errorHandler.checkServerError(this.recordManager.response);
+ return false;
+ }
+
+ if (this.recordManager.response.status == 404) {
+ this._log.debug("No meta/global record on the server. Creating one.");
+ try {
+ await this._uploadNewMetaGlobal();
+ } catch (uploadRes) {
+ this._log.warn(
+ "Unable to upload new meta/global. Failing remote setup."
+ );
+ this.errorHandler.checkServerError(uploadRes);
+ return false;
+ }
+ } else if (!newMeta) {
+ this._log.warn("Unable to get meta/global. Failing remote setup.");
+ this.errorHandler.checkServerError(this.recordManager.response);
+ return false;
+ } else {
+ // If newMeta, then it stands to reason that meta != null.
+ newMeta.isNew = meta.isNew;
+ newMeta.changed = meta.changed;
+ }
+
+ // Switch in the new meta object and record the new time.
+ meta = newMeta;
+ this.metaModified = infoResponse.obj.meta;
+ }
+
+ let remoteVersion =
+ meta && meta.payload.storageVersion ? meta.payload.storageVersion : "";
+
+ this._log.debug(
+ [
+ "Weave Version:",
+ WEAVE_VERSION,
+ "Local Storage:",
+ STORAGE_VERSION,
+ "Remote Storage:",
+ remoteVersion,
+ ].join(" ")
+ );
+
+ // Check for cases that require a fresh start. When comparing remoteVersion,
+ // we need to convert it to a number as older clients used it as a string.
+ if (
+ !meta ||
+ !meta.payload.storageVersion ||
+ !meta.payload.syncID ||
+ STORAGE_VERSION > parseFloat(remoteVersion)
+ ) {
+ this._log.info(
+ "One of: no meta, no meta storageVersion, or no meta syncID. Fresh start needed."
+ );
+
+ // abort the server wipe if the GET status was anything other than 404 or 200
+ let status = this.recordManager.response.status;
+ if (status != 200 && status != 404) {
+ this.status.sync = METARECORD_DOWNLOAD_FAIL;
+ this.errorHandler.checkServerError(this.recordManager.response);
+ this._log.warn(
+ "Unknown error while downloading metadata record. Aborting sync."
+ );
+ return false;
+ }
+
+ if (!meta) {
+ this._log.info("No metadata record, server wipe needed");
+ }
+ if (meta && !meta.payload.syncID) {
+ this._log.warn("No sync id, server wipe needed");
+ }
+
+ this._log.info("Wiping server data");
+ await this._freshStart();
+
+ if (status == 404) {
+ this._log.info(
+ "Metadata record not found, server was wiped to ensure " +
+ "consistency."
+ );
+ } else {
+ // 200
+ this._log.info("Wiped server; incompatible metadata: " + remoteVersion);
+ }
+ return true;
+ } else if (remoteVersion > STORAGE_VERSION) {
+ this.status.sync = VERSION_OUT_OF_DATE;
+ this._log.warn("Upgrade required to access newer storage version.");
+ return false;
+ } else if (meta.payload.syncID != this.syncID) {
+ this._log.info(
+ "Sync IDs differ. Local is " +
+ this.syncID +
+ ", remote is " +
+ meta.payload.syncID
+ );
+ await this.resetClient();
+ this.collectionKeys.clear();
+ this.syncID = meta.payload.syncID;
+ this._log.debug("Clear cached values and take syncId: " + this.syncID);
+
+ if (!(await this.verifyAndFetchSymmetricKeys(infoResponse))) {
+ this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
+ return false;
+ }
+
+ // bug 545725 - re-verify creds and fail sanely
+ if (!(await this.verifyLogin())) {
+ this.status.sync = CREDENTIALS_CHANGED;
+ this._log.info(
+ "Credentials have changed, aborting sync and forcing re-login."
+ );
+ return false;
+ }
+
+ return true;
+ }
+ if (!(await this.verifyAndFetchSymmetricKeys(infoResponse))) {
+ this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
+ return false;
+ }
+
+ return true;
+ },
+
+ /**
+ * Return whether we should attempt login at the start of a sync.
+ *
+ * Note that this function has strong ties to _checkSync: callers
+ * of this function should typically use _checkSync to verify that
+ * any necessary login took place.
+ */
+ _shouldLogin: function _shouldLogin() {
+ return (
+ this.enabled &&
+ !this.scheduler.offline &&
+ !this.isLoggedIn &&
+ Async.isAppReady()
+ );
+ },
+
+ /**
+ * Determine if a sync should run.
+ *
+ * @param ignore [optional]
+ * array of reasons to ignore when checking
+ *
+ * @return Reason for not syncing; not-truthy if sync should run
+ */
+ _checkSync: function _checkSync(ignore) {
+ let reason = "";
+ // Ideally we'd call _checkSetup() here but that has too many side-effects.
+ if (Status.service == CLIENT_NOT_CONFIGURED) {
+ reason = kSyncNotConfigured;
+ } else if (Status.service == STATUS_DISABLED || !this.enabled) {
+ reason = kSyncWeaveDisabled;
+ } else if (this.scheduler.offline) {
+ reason = kSyncNetworkOffline;
+ } else if (this.status.minimumNextSync > Date.now()) {
+ reason = kSyncBackoffNotMet;
+ } else if (
+ this.status.login == MASTER_PASSWORD_LOCKED &&
+ Utils.mpLocked()
+ ) {
+ reason = kSyncMasterPasswordLocked;
+ } else if (Svc.Prefs.get("firstSync") == "notReady") {
+ reason = kFirstSyncChoiceNotMade;
+ } else if (!Async.isAppReady()) {
+ reason = kFirefoxShuttingDown;
+ }
+
+ if (ignore && ignore.includes(reason)) {
+ return "";
+ }
+
+ return reason;
+ },
+
+ async sync({ engines, why } = {}) {
+ let dateStr = Utils.formatTimestamp(new Date());
+ this._log.debug("User-Agent: " + Utils.userAgent);
+ await this.promiseInitialized;
+ this._log.info(
+ `Starting sync at ${dateStr} in browser session ${lazy.browserSessionID}`
+ );
+ return this._catch(async function () {
+ // Make sure we're logged in.
+ if (this._shouldLogin()) {
+ this._log.debug("In sync: should login.");
+ if (!(await this.login())) {
+ this._log.debug("Not syncing: login returned false.");
+ return;
+ }
+ } else {
+ this._log.trace("In sync: no need to login.");
+ }
+ await this._lockedSync(engines, why);
+ })();
+ },
+
+ /**
+ * Sync up engines with the server.
+ */
+ async _lockedSync(engineNamesToSync, why) {
+ return this._lock(
+ "service.js: sync",
+ this._notify("sync", JSON.stringify({ why }), async function onNotify() {
+ let histogram =
+ Services.telemetry.getHistogramById("WEAVE_START_COUNT");
+ histogram.add(1);
+
+ let synchronizer = new EngineSynchronizer(this);
+ await synchronizer.sync(engineNamesToSync, why); // Might throw!
+
+ histogram = Services.telemetry.getHistogramById(
+ "WEAVE_COMPLETE_SUCCESS_COUNT"
+ );
+ histogram.add(1);
+
+ // We successfully synchronized.
+ // Check if the identity wants to pre-fetch a migration sentinel from
+ // the server.
+ // If we have no clusterURL, we are probably doing a node reassignment
+ // so don't attempt to get it in that case.
+ if (this.clusterURL) {
+ this.identity.prefetchMigrationSentinel(this);
+ }
+
+ // Now let's update our declined engines
+ await this._maybeUpdateDeclined();
+ })
+ )();
+ },
+
+ /**
+ * Update the "declined" information in meta/global if necessary.
+ */
+ async _maybeUpdateDeclined() {
+ // if Sync failed due to no node we will not have a meta URL, so can't
+ // update anything.
+ if (!this.metaURL) {
+ return;
+ }
+ let meta = await this.recordManager.get(this.metaURL);
+ if (!meta) {
+ this._log.warn("No meta/global; can't update declined state.");
+ return;
+ }
+
+ let declinedEngines = new DeclinedEngines(this);
+ let didChange = declinedEngines.updateDeclined(meta, this.engineManager);
+ if (!didChange) {
+ this._log.info(
+ "No change to declined engines. Not reuploading meta/global."
+ );
+ return;
+ }
+
+ await this.uploadMetaGlobal(meta);
+ },
+
+ /**
+ * Upload a fresh meta/global record
+ * @throws the response object if the upload request was not a success
+ */
+ async _uploadNewMetaGlobal() {
+ let meta = new WBORecord("meta", "global");
+ meta.payload.syncID = this.syncID;
+ meta.payload.storageVersion = STORAGE_VERSION;
+ meta.payload.declined = this.engineManager.getDeclined();
+ meta.modified = 0;
+ meta.isNew = true;
+
+ await this.uploadMetaGlobal(meta);
+ },
+
+ /**
+ * Upload meta/global, throwing the response on failure
+ * @param {WBORecord} meta meta/global record
+ * @throws the response object if the request was not a success
+ */
+ async uploadMetaGlobal(meta) {
+ this._log.debug("Uploading meta/global", meta);
+ let res = this.resource(this.metaURL);
+ res.setHeader("X-If-Unmodified-Since", meta.modified);
+ let response = await res.put(meta);
+ if (!response.success) {
+ throw response;
+ }
+ // From https://docs.services.mozilla.com/storage/apis-1.5.html:
+ // "Successful responses will return the new last-modified time for the collection."
+ meta.modified = response.obj;
+ this.recordManager.set(this.metaURL, meta);
+ },
+
+ /**
+ * Upload crypto/keys
+ * @param {WBORecord} cryptoKeys crypto/keys record
+ * @param {Number} lastModified known last modified timestamp (in decimal seconds),
+ * will be used to set the X-If-Unmodified-Since header
+ */
+ async _uploadCryptoKeys(cryptoKeys, lastModified) {
+ this._log.debug(`Uploading crypto/keys (lastModified: ${lastModified})`);
+ let res = this.resource(this.cryptoKeysURL);
+ res.setHeader("X-If-Unmodified-Since", lastModified);
+ return res.put(cryptoKeys);
+ },
+
+ async _freshStart() {
+ this._log.info("Fresh start. Resetting client.");
+ await this.resetClient();
+ this.collectionKeys.clear();
+
+ // Wipe the server.
+ await this.wipeServer();
+
+ // Upload a new meta/global record.
+ // _uploadNewMetaGlobal throws on failure -- including race conditions.
+ // If we got into a race condition, we'll abort the sync this way, too.
+ // That's fine. We'll just wait till the next sync. The client that we're
+ // racing is probably busy uploading stuff right now anyway.
+ await this._uploadNewMetaGlobal();
+
+ // Wipe everything we know about except meta because we just uploaded it
+ // TODO: there's a bug here. We should be calling resetClient, no?
+
+ // Generate, upload, and download new keys. Do this last so we don't wipe
+ // them...
+ await this.generateNewSymmetricKeys();
+ },
+
+ /**
+ * Wipe user data from the server.
+ *
+ * @param collections [optional]
+ * Array of collections to wipe. If not given, all collections are
+ * wiped by issuing a DELETE request for `storageURL`.
+ *
+ * @return the server's timestamp of the (last) DELETE.
+ */
+ async wipeServer(collections) {
+ let response;
+ let histogram = Services.telemetry.getHistogramById(
+ "WEAVE_WIPE_SERVER_SUCCEEDED"
+ );
+ if (!collections) {
+ // Strip the trailing slash.
+ let res = this.resource(this.storageURL.slice(0, -1));
+ res.setHeader("X-Confirm-Delete", "1");
+ try {
+ response = await res.delete();
+ } catch (ex) {
+ this._log.debug("Failed to wipe server", ex);
+ histogram.add(false);
+ throw ex;
+ }
+ if (response.status != 200 && response.status != 404) {
+ this._log.debug(
+ "Aborting wipeServer. Server responded with " +
+ response.status +
+ " response for " +
+ this.storageURL
+ );
+ histogram.add(false);
+ throw response;
+ }
+ histogram.add(true);
+ return response.headers["x-weave-timestamp"];
+ }
+
+ let timestamp;
+ for (let name of collections) {
+ let url = this.storageURL + name;
+ try {
+ response = await this.resource(url).delete();
+ } catch (ex) {
+ this._log.debug("Failed to wipe '" + name + "' collection", ex);
+ histogram.add(false);
+ throw ex;
+ }
+
+ if (response.status != 200 && response.status != 404) {
+ this._log.debug(
+ "Aborting wipeServer. Server responded with " +
+ response.status +
+ " response for " +
+ url
+ );
+ histogram.add(false);
+ throw response;
+ }
+
+ if ("x-weave-timestamp" in response.headers) {
+ timestamp = response.headers["x-weave-timestamp"];
+ }
+ }
+ histogram.add(true);
+ return timestamp;
+ },
+
+ /**
+ * Wipe all local user data.
+ *
+ * @param engines [optional]
+ * Array of engine names to wipe. If not given, all engines are used.
+ */
+ async wipeClient(engines) {
+ // If we don't have any engines, reset the service and wipe all engines
+ if (!engines) {
+ // Clear out any service data
+ await this.resetService();
+
+ engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ } else {
+ // Convert the array of names into engines
+ engines = this.engineManager.get(engines);
+ }
+
+ // Fully wipe each engine if it's able to decrypt data
+ for (let engine of engines) {
+ if (await engine.canDecrypt()) {
+ await engine.wipeClient();
+ }
+ }
+ },
+
+ /**
+ * Wipe all remote user data by wiping the server then telling each remote
+ * client to wipe itself.
+ *
+ * @param engines
+ * Array of engine names to wipe.
+ */
+ async wipeRemote(engines) {
+ try {
+ // Make sure stuff gets uploaded.
+ await this.resetClient(engines);
+
+ // Clear out any server data.
+ await this.wipeServer(engines);
+
+ // Only wipe the engines provided.
+ let extra = { reason: "wipe-remote" };
+ for (const e of engines) {
+ await this.clientsEngine.sendCommand("wipeEngine", [e], null, extra);
+ }
+
+ // Make sure the changed clients get updated.
+ await this.clientsEngine.sync();
+ } catch (ex) {
+ this.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+ },
+
+ /**
+ * Reset local service information like logs, sync times, caches.
+ */
+ async resetService() {
+ return this._catch(async function reset() {
+ this._log.info("Service reset.");
+
+ // Pretend we've never synced to the server and drop cached data
+ this.syncID = "";
+ this.recordManager.clearCache();
+ })();
+ },
+
+ /**
+ * Reset the client by getting rid of any local server data and client data.
+ *
+ * @param engines [optional]
+ * Array of engine names to reset. If not given, all engines are used.
+ */
+ async resetClient(engines) {
+ return this._catch(async function doResetClient() {
+ // If we don't have any engines, reset everything including the service
+ if (!engines) {
+ // Clear out any service data
+ await this.resetService();
+
+ engines = [this.clientsEngine, ...this.engineManager.getAll()];
+ } else {
+ // Convert the array of names into engines
+ engines = this.engineManager.get(engines);
+ }
+
+ // Have each engine drop any temporary meta data
+ for (let engine of engines) {
+ await engine.resetClient();
+ }
+ })();
+ },
+
+ recordTelemetryEvent(object, method, value, extra = undefined) {
+ Svc.Obs.notify("weave:telemetry:event", { object, method, value, extra });
+ },
+};
+
+export var Service = new Sync11Service();
+Service.promiseInitialized = new Promise(resolve => {
+ Service.onStartup().then(resolve);
+});
diff --git a/services/sync/modules/stages/declined.sys.mjs b/services/sync/modules/stages/declined.sys.mjs
new file mode 100644
index 0000000000..2c74aab117
--- /dev/null
+++ b/services/sync/modules/stages/declined.sys.mjs
@@ -0,0 +1,78 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains code for maintaining the set of declined engines,
+ * in conjunction with EngineManager.
+ */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { Observers } from "resource://services-common/observers.sys.mjs";
+
+export var DeclinedEngines = function (service) {
+ this._log = Log.repository.getLogger("Sync.Declined");
+ this._log.manageLevelFromPref("services.sync.log.logger.declined");
+
+ this.service = service;
+};
+
+DeclinedEngines.prototype = {
+ updateDeclined(meta, engineManager = this.service.engineManager) {
+ let enabled = new Set(engineManager.getEnabled().map(e => e.name));
+ let known = new Set(engineManager.getAll().map(e => e.name));
+ let remoteDeclined = new Set(meta.payload.declined || []);
+ let localDeclined = new Set(engineManager.getDeclined());
+
+ this._log.debug(
+ "Handling remote declined: " + JSON.stringify([...remoteDeclined])
+ );
+ this._log.debug(
+ "Handling local declined: " + JSON.stringify([...localDeclined])
+ );
+
+ // Any engines that are locally enabled should be removed from the remote
+ // declined list.
+ //
+ // Any engines that are locally declined should be added to the remote
+ // declined list.
+ let newDeclined = CommonUtils.union(
+ localDeclined,
+ CommonUtils.difference(remoteDeclined, enabled)
+ );
+
+ // If our declined set has changed, put it into the meta object and mark
+ // it as changed.
+ let declinedChanged = !CommonUtils.setEqual(newDeclined, remoteDeclined);
+ this._log.debug("Declined changed? " + declinedChanged);
+ if (declinedChanged) {
+ meta.changed = true;
+ meta.payload.declined = [...newDeclined];
+ }
+
+ // Update the engine manager regardless.
+ engineManager.setDeclined(newDeclined);
+
+ // Any engines that are locally known, locally disabled, and not remotely
+ // or locally declined, are candidates for enablement.
+ let undecided = CommonUtils.difference(
+ CommonUtils.difference(known, enabled),
+ newDeclined
+ );
+ if (undecided.size) {
+ let subject = {
+ declined: newDeclined,
+ enabled,
+ known,
+ undecided,
+ };
+ CommonUtils.nextTick(() => {
+ Observers.notify("weave:engines:notdeclined", subject);
+ });
+ }
+
+ return declinedChanged;
+ },
+};
diff --git a/services/sync/modules/stages/enginesync.sys.mjs b/services/sync/modules/stages/enginesync.sys.mjs
new file mode 100644
index 0000000000..6078a3af0e
--- /dev/null
+++ b/services/sync/modules/stages/enginesync.sys.mjs
@@ -0,0 +1,400 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains code for synchronizing engines.
+ */
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import {
+ ABORT_SYNC_COMMAND,
+ LOGIN_FAILED_NETWORK_ERROR,
+ NO_SYNC_NODE_FOUND,
+ STATUS_OK,
+ SYNC_FAILED_PARTIAL,
+ SYNC_SUCCEEDED,
+ WEAVE_VERSION,
+ kSyncNetworkOffline,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+
+const lazy = {};
+ChromeUtils.defineESModuleGetters(lazy, {
+ Doctor: "resource://services-sync/doctor.sys.mjs",
+});
+
+/**
+ * Perform synchronization of engines.
+ *
+ * This was originally split out of service.js. The API needs lots of love.
+ */
+export function EngineSynchronizer(service) {
+ this._log = Log.repository.getLogger("Sync.Synchronizer");
+ this._log.manageLevelFromPref("services.sync.log.logger.synchronizer");
+
+ this.service = service;
+}
+
+EngineSynchronizer.prototype = {
+ async sync(engineNamesToSync, why) {
+ let fastSync = why && why == "sleep";
+ let startTime = Date.now();
+
+ this.service.status.resetSync();
+
+ // Make sure we should sync or record why we shouldn't.
+ let reason = this.service._checkSync();
+ if (reason) {
+ if (reason == kSyncNetworkOffline) {
+ this.service.status.sync = LOGIN_FAILED_NETWORK_ERROR;
+ }
+
+ // this is a purposeful abort rather than a failure, so don't set
+ // any status bits
+ reason = "Can't sync: " + reason;
+ throw new Error(reason);
+ }
+
+ // If we don't have a node, get one. If that fails, retry in 10 minutes.
+ if (
+ !this.service.clusterURL &&
+ !(await this.service.identity.setCluster())
+ ) {
+ this.service.status.sync = NO_SYNC_NODE_FOUND;
+ this._log.info("No cluster URL found. Cannot sync.");
+ return;
+ }
+
+ // Ping the server with a special info request once a day.
+ let infoURL = this.service.infoURL;
+ let now = Math.floor(Date.now() / 1000);
+ let lastPing = Svc.Prefs.get("lastPing", 0);
+ if (now - lastPing > 86400) {
+ // 60 * 60 * 24
+ infoURL += "?v=" + WEAVE_VERSION;
+ Svc.Prefs.set("lastPing", now);
+ }
+
+ let engineManager = this.service.engineManager;
+
+ // Figure out what the last modified time is for each collection
+ let info = await this.service._fetchInfo(infoURL);
+
+ // Convert the response to an object and read out the modified times
+ for (let engine of [this.service.clientsEngine].concat(
+ engineManager.getAll()
+ )) {
+ engine.lastModified = info.obj[engine.name] || 0;
+ }
+
+ if (!(await this.service._remoteSetup(info, !fastSync))) {
+ throw new Error("Aborting sync, remote setup failed");
+ }
+
+ if (!fastSync) {
+ // Make sure we have an up-to-date list of clients before sending commands
+ this._log.debug("Refreshing client list.");
+ if (!(await this._syncEngine(this.service.clientsEngine))) {
+ // Clients is an engine like any other; it can fail with a 401,
+ // and we can elect to abort the sync.
+ this._log.warn("Client engine sync failed. Aborting.");
+ return;
+ }
+ }
+
+ // We only honor the "hint" of what engines to Sync if this isn't
+ // a first sync.
+ let allowEnginesHint = false;
+ // Wipe data in the desired direction if necessary
+ switch (Svc.Prefs.get("firstSync")) {
+ case "resetClient":
+ await this.service.resetClient(engineManager.enabledEngineNames);
+ break;
+ case "wipeClient":
+ await this.service.wipeClient(engineManager.enabledEngineNames);
+ break;
+ case "wipeRemote":
+ await this.service.wipeRemote(engineManager.enabledEngineNames);
+ break;
+ default:
+ allowEnginesHint = true;
+ break;
+ }
+
+ if (!fastSync && this.service.clientsEngine.localCommands) {
+ try {
+ if (!(await this.service.clientsEngine.processIncomingCommands())) {
+ this.service.status.sync = ABORT_SYNC_COMMAND;
+ throw new Error("Processed command aborted sync.");
+ }
+
+ // Repeat remoteSetup in-case the commands forced us to reset
+ if (!(await this.service._remoteSetup(info))) {
+ throw new Error("Remote setup failed after processing commands.");
+ }
+ } finally {
+ // Always immediately attempt to push back the local client (now
+ // without commands).
+ // Note that we don't abort here; if there's a 401 because we've
+ // been reassigned, we'll handle it around another engine.
+ await this._syncEngine(this.service.clientsEngine);
+ }
+ }
+
+ // Update engines because it might change what we sync.
+ try {
+ await this._updateEnabledEngines();
+ } catch (ex) {
+ this._log.debug("Updating enabled engines failed", ex);
+ this.service.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+
+ await this.service.engineManager.switchAlternatives();
+
+ // If the engines to sync has been specified, we sync in the order specified.
+ let enginesToSync;
+ if (allowEnginesHint && engineNamesToSync) {
+ this._log.info("Syncing specified engines", engineNamesToSync);
+ enginesToSync = engineManager
+ .get(engineNamesToSync)
+ .filter(e => e.enabled);
+ } else {
+ this._log.info("Syncing all enabled engines.");
+ enginesToSync = engineManager.getEnabled();
+ }
+ try {
+ // We don't bother validating engines that failed to sync.
+ let enginesToValidate = [];
+ for (let engine of enginesToSync) {
+ if (engine.shouldSkipSync(why)) {
+ this._log.info(`Engine ${engine.name} asked to be skipped`);
+ continue;
+ }
+ // If there's any problems with syncing the engine, report the failure
+ if (
+ !(await this._syncEngine(engine)) ||
+ this.service.status.enforceBackoff
+ ) {
+ this._log.info("Aborting sync for failure in " + engine.name);
+ break;
+ }
+ enginesToValidate.push(engine);
+ }
+
+ // If _syncEngine fails for a 401, we might not have a cluster URL here.
+ // If that's the case, break out of this immediately, rather than
+ // throwing an exception when trying to fetch metaURL.
+ if (!this.service.clusterURL) {
+ this._log.debug(
+ "Aborting sync, no cluster URL: not uploading new meta/global."
+ );
+ return;
+ }
+
+ // Upload meta/global if any engines changed anything.
+ let meta = await this.service.recordManager.get(this.service.metaURL);
+ if (meta.isNew || meta.changed) {
+ this._log.info("meta/global changed locally: reuploading.");
+ try {
+ await this.service.uploadMetaGlobal(meta);
+ delete meta.isNew;
+ delete meta.changed;
+ } catch (error) {
+ this._log.error(
+ "Unable to upload meta/global. Leaving marked as new."
+ );
+ }
+ }
+
+ if (!fastSync) {
+ await lazy.Doctor.consult(enginesToValidate);
+ }
+
+ // If there were no sync engine failures
+ if (this.service.status.service != SYNC_FAILED_PARTIAL) {
+ this.service.status.sync = SYNC_SUCCEEDED;
+ }
+
+ // Even if there were engine failures, bump lastSync even on partial since
+ // it's reflected in the UI (bug 1439777).
+ if (
+ this.service.status.service == SYNC_FAILED_PARTIAL ||
+ this.service.status.service == STATUS_OK
+ ) {
+ Svc.Prefs.set("lastSync", new Date().toString());
+ }
+ } finally {
+ Svc.Prefs.reset("firstSync");
+
+ let syncTime = ((Date.now() - startTime) / 1000).toFixed(2);
+ let dateStr = Utils.formatTimestamp(new Date());
+ this._log.info(
+ "Sync completed at " + dateStr + " after " + syncTime + " secs."
+ );
+ }
+ },
+
+ // Returns true if sync should proceed.
+ // false / no return value means sync should be aborted.
+ async _syncEngine(engine) {
+ try {
+ await engine.sync();
+ } catch (e) {
+ if (e.status == 401) {
+ // Maybe a 401, cluster update perhaps needed?
+ // We rely on ErrorHandler observing the sync failure notification to
+ // schedule another sync and clear node assignment values.
+ // Here we simply want to muffle the exception and return an
+ // appropriate value.
+ return false;
+ }
+ // Note that policies.js has already logged info about the exception...
+ if (Async.isShutdownException(e)) {
+ // Failure due to a shutdown exception should prevent other engines
+ // trying to start and immediately failing.
+ this._log.info(
+ `${engine.name} was interrupted by shutdown; no other engines will sync`
+ );
+ return false;
+ }
+ }
+
+ return true;
+ },
+
+ async _updateEnabledFromMeta(
+ meta,
+ numClients,
+ engineManager = this.service.engineManager
+ ) {
+ this._log.info("Updating enabled engines: " + numClients + " clients.");
+
+ if (meta.isNew || !meta.payload.engines) {
+ this._log.debug(
+ "meta/global isn't new, or is missing engines. Not updating enabled state."
+ );
+ return;
+ }
+
+ // If we're the only client, and no engines are marked as enabled,
+ // thumb our noses at the server data: it can't be right.
+ // Belt-and-suspenders approach to Bug 615926.
+ let hasEnabledEngines = false;
+ for (let e in meta.payload.engines) {
+ if (e != "clients") {
+ hasEnabledEngines = true;
+ break;
+ }
+ }
+
+ if (numClients <= 1 && !hasEnabledEngines) {
+ this._log.info(
+ "One client and no enabled engines: not touching local engine status."
+ );
+ return;
+ }
+
+ this.service._ignorePrefObserver = true;
+
+ let enabled = engineManager.enabledEngineNames;
+
+ let toDecline = new Set();
+ let toUndecline = new Set();
+
+ for (let engineName in meta.payload.engines) {
+ if (engineName == "clients") {
+ // Clients is special.
+ continue;
+ }
+ let index = enabled.indexOf(engineName);
+ if (index != -1) {
+ // The engine is enabled locally. Nothing to do.
+ enabled.splice(index, 1);
+ continue;
+ }
+ let engine = engineManager.get(engineName);
+ if (!engine) {
+ // The engine doesn't exist locally. Nothing to do.
+ continue;
+ }
+
+ let attemptedEnable = false;
+ // If the engine was enabled remotely, enable it locally.
+ if (!Svc.Prefs.get("engineStatusChanged." + engine.prefName, false)) {
+ this._log.trace(
+ "Engine " + engineName + " was enabled. Marking as non-declined."
+ );
+ toUndecline.add(engineName);
+ this._log.trace(engineName + " engine was enabled remotely.");
+ engine.enabled = true;
+ // Note that setting engine.enabled to true might not have worked for
+ // the password engine if a master-password is enabled. However, it's
+ // still OK that we added it to undeclined - the user *tried* to enable
+ // it remotely - so it still winds up as not being flagged as declined
+ // even though it's disabled remotely.
+ attemptedEnable = true;
+ }
+
+ // If either the engine was disabled locally or enabling the engine
+ // failed (see above re master-password) then wipe server data and
+ // disable it everywhere.
+ if (!engine.enabled) {
+ this._log.trace("Wiping data for " + engineName + " engine.");
+ await engine.wipeServer();
+ delete meta.payload.engines[engineName];
+ meta.changed = true; // the new enabled state must propagate
+ // We also here mark the engine as declined, because the pref
+ // was explicitly changed to false - unless we tried, and failed,
+ // to enable it - in which case we leave the declined state alone.
+ if (!attemptedEnable) {
+ // This will be reflected in meta/global in the next stage.
+ this._log.trace(
+ "Engine " +
+ engineName +
+ " was disabled locally. Marking as declined."
+ );
+ toDecline.add(engineName);
+ }
+ }
+ }
+
+ // Any remaining engines were either enabled locally or disabled remotely.
+ for (let engineName of enabled) {
+ let engine = engineManager.get(engineName);
+ if (Svc.Prefs.get("engineStatusChanged." + engine.prefName, false)) {
+ this._log.trace("The " + engineName + " engine was enabled locally.");
+ toUndecline.add(engineName);
+ } else {
+ this._log.trace("The " + engineName + " engine was disabled remotely.");
+
+ // Don't automatically mark it as declined!
+ try {
+ engine.enabled = false;
+ } catch (e) {
+ this._log.trace("Failed to disable engine " + engineName);
+ }
+ }
+ }
+
+ engineManager.decline(toDecline);
+ engineManager.undecline(toUndecline);
+
+ Svc.Prefs.resetBranch("engineStatusChanged.");
+ this.service._ignorePrefObserver = false;
+ },
+
+ async _updateEnabledEngines() {
+ let meta = await this.service.recordManager.get(this.service.metaURL);
+ let numClients = this.service.scheduler.numClients;
+ let engineManager = this.service.engineManager;
+
+ await this._updateEnabledFromMeta(meta, numClients, engineManager);
+ },
+};
+Object.freeze(EngineSynchronizer.prototype);
diff --git a/services/sync/modules/status.sys.mjs b/services/sync/modules/status.sys.mjs
new file mode 100644
index 0000000000..429dbda7b6
--- /dev/null
+++ b/services/sync/modules/status.sys.mjs
@@ -0,0 +1,135 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import {
+ CLIENT_NOT_CONFIGURED,
+ ENGINE_SUCCEEDED,
+ LOGIN_FAILED,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_NO_USERNAME,
+ LOGIN_SUCCEEDED,
+ STATUS_OK,
+ SYNC_FAILED,
+ SYNC_FAILED_PARTIAL,
+ SYNC_SUCCEEDED,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { SyncAuthManager } from "resource://services-sync/sync_auth.sys.mjs";
+
+export var Status = {
+ _log: Log.repository.getLogger("Sync.Status"),
+ __authManager: null,
+ ready: false,
+
+ get _authManager() {
+ if (this.__authManager) {
+ return this.__authManager;
+ }
+ this.__authManager = new SyncAuthManager();
+ return this.__authManager;
+ },
+
+ get service() {
+ return this._service;
+ },
+
+ set service(code) {
+ this._log.debug(
+ "Status.service: " + (this._service || undefined) + " => " + code
+ );
+ this._service = code;
+ },
+
+ get login() {
+ return this._login;
+ },
+
+ set login(code) {
+ this._log.debug("Status.login: " + this._login + " => " + code);
+ this._login = code;
+
+ if (
+ code == LOGIN_FAILED_NO_USERNAME ||
+ code == LOGIN_FAILED_NO_PASSPHRASE
+ ) {
+ this.service = CLIENT_NOT_CONFIGURED;
+ } else if (code != LOGIN_SUCCEEDED) {
+ this.service = LOGIN_FAILED;
+ } else {
+ this.service = STATUS_OK;
+ }
+ },
+
+ get sync() {
+ return this._sync;
+ },
+
+ set sync(code) {
+ this._log.debug("Status.sync: " + this._sync + " => " + code);
+ this._sync = code;
+ this.service = code == SYNC_SUCCEEDED ? STATUS_OK : SYNC_FAILED;
+ },
+
+ get engines() {
+ return this._engines;
+ },
+
+ set engines([name, code]) {
+ this._log.debug("Status for engine " + name + ": " + code);
+ this._engines[name] = code;
+
+ if (code != ENGINE_SUCCEEDED) {
+ this.service = SYNC_FAILED_PARTIAL;
+ }
+ },
+
+ // Implement toString because adding a logger introduces a cyclic object
+ // value, so we can't trivially debug-print Status as JSON.
+ toString: function toString() {
+ return (
+ "<Status" +
+ ": login: " +
+ Status.login +
+ ", service: " +
+ Status.service +
+ ", sync: " +
+ Status.sync +
+ ">"
+ );
+ },
+
+ checkSetup: function checkSetup() {
+ if (!this._authManager.username) {
+ Status.login = LOGIN_FAILED_NO_USERNAME;
+ Status.service = CLIENT_NOT_CONFIGURED;
+ } else if (Status.login == STATUS_OK) {
+ Status.service = STATUS_OK;
+ }
+ return Status.service;
+ },
+
+ resetBackoff: function resetBackoff() {
+ this.enforceBackoff = false;
+ this.backoffInterval = 0;
+ this.minimumNextSync = 0;
+ },
+
+ resetSync: function resetSync() {
+ // Logger setup.
+ this._log.manageLevelFromPref("services.sync.log.logger.status");
+
+ this._log.info("Resetting Status.");
+ this.service = STATUS_OK;
+ this._login = LOGIN_SUCCEEDED;
+ this._sync = SYNC_SUCCEEDED;
+ this._engines = {};
+ this.partial = false;
+ },
+};
+
+// Initialize various status values.
+Status.resetBackoff();
+Status.resetSync();
diff --git a/services/sync/modules/sync_auth.sys.mjs b/services/sync/modules/sync_auth.sys.mjs
new file mode 100644
index 0000000000..2f2bd2af77
--- /dev/null
+++ b/services/sync/modules/sync_auth.sys.mjs
@@ -0,0 +1,656 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+import { Async } from "resource://services-common/async.sys.mjs";
+import { TokenServerClient } from "resource://services-common/tokenserverclient.sys.mjs";
+import { CryptoUtils } from "resource://services-crypto/utils.sys.mjs";
+import { Svc, Utils } from "resource://services-sync/util.sys.mjs";
+
+import {
+ LOGIN_FAILED_LOGIN_REJECTED,
+ LOGIN_FAILED_NETWORK_ERROR,
+ LOGIN_FAILED_NO_USERNAME,
+ LOGIN_SUCCEEDED,
+ MASTER_PASSWORD_LOCKED,
+ STATUS_OK,
+} from "resource://services-sync/constants.sys.mjs";
+
+const lazy = {};
+
+// Lazy imports to prevent unnecessary load on startup.
+ChromeUtils.defineESModuleGetters(lazy, {
+ BulkKeyBundle: "resource://services-sync/keys.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+XPCOMUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+XPCOMUtils.defineLazyGetter(lazy, "log", function () {
+ let log = Log.repository.getLogger("Sync.SyncAuthManager");
+ log.manageLevelFromPref("services.sync.log.logger.identity");
+ return log;
+});
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "IGNORE_CACHED_AUTH_CREDENTIALS",
+ "services.sync.debug.ignoreCachedAuthCredentials"
+);
+
+// FxAccountsCommon.js doesn't use a "namespace", so create one here.
+var fxAccountsCommon = ChromeUtils.import(
+ "resource://gre/modules/FxAccountsCommon.js"
+);
+
+const SCOPE_OLD_SYNC = fxAccountsCommon.SCOPE_OLD_SYNC;
+
+const OBSERVER_TOPICS = [
+ fxAccountsCommon.ONLOGIN_NOTIFICATION,
+ fxAccountsCommon.ONVERIFIED_NOTIFICATION,
+ fxAccountsCommon.ONLOGOUT_NOTIFICATION,
+ fxAccountsCommon.ON_ACCOUNT_STATE_CHANGE_NOTIFICATION,
+ "weave:connected",
+];
+
+/*
+ General authentication error for abstracting authentication
+ errors from multiple sources (e.g., from FxAccounts, TokenServer).
+ details is additional details about the error - it might be a string, or
+ some other error object (which should do the right thing when toString() is
+ called on it)
+*/
+export function AuthenticationError(details, source) {
+ this.details = details;
+ this.source = source;
+}
+
+AuthenticationError.prototype = {
+ toString() {
+ return "AuthenticationError(" + this.details + ")";
+ },
+};
+
+// The `SyncAuthManager` coordinates access authorization to the Sync server.
+// Its job is essentially to get us from having a signed-in Firefox Accounts user,
+// to knowing the user's sync storage node and having the necessary short-lived
+// credentials in order to access it.
+//
+
+export function SyncAuthManager() {
+ // NOTE: _fxaService and _tokenServerClient are replaced with mocks by
+ // the test suite.
+ this._fxaService = lazy.fxAccounts;
+ this._tokenServerClient = new TokenServerClient();
+ this._tokenServerClient.observerPrefix = "weave:service";
+ this._log = lazy.log;
+ XPCOMUtils.defineLazyPreferenceGetter(
+ this,
+ "_username",
+ "services.sync.username"
+ );
+
+ this.asyncObserver = Async.asyncObserver(this, lazy.log);
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.addObserver(this.asyncObserver, topic);
+ }
+}
+
+SyncAuthManager.prototype = {
+ _fxaService: null,
+ _tokenServerClient: null,
+ // https://docs.services.mozilla.com/token/apis.html
+ _token: null,
+ // protection against the user changing underneath us - the uid
+ // of the current user.
+ _userUid: null,
+
+ hashedUID() {
+ const id = this._fxaService.telemetry.getSanitizedUID();
+ if (!id) {
+ throw new Error("hashedUID: Don't seem to have previously seen a token");
+ }
+ return id;
+ },
+
+ // Return a hashed version of a deviceID, suitable for telemetry.
+ hashedDeviceID(deviceID) {
+ const id = this._fxaService.telemetry.sanitizeDeviceId(deviceID);
+ if (!id) {
+ throw new Error("hashedUID: Don't seem to have previously seen a token");
+ }
+ return id;
+ },
+
+ // The "node type" reported to telemetry or null if not specified.
+ get telemetryNodeType() {
+ return this._token && this._token.node_type ? this._token.node_type : null;
+ },
+
+ finalize() {
+ // After this is called, we can expect Service.identity != this.
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.removeObserver(this.asyncObserver, topic);
+ }
+ this.resetCredentials();
+ this._userUid = null;
+ },
+
+ async getSignedInUser() {
+ let data = await this._fxaService.getSignedInUser();
+ if (!data) {
+ this._userUid = null;
+ return null;
+ }
+ if (this._userUid == null) {
+ this._userUid = data.uid;
+ } else if (this._userUid != data.uid) {
+ throw new Error("The signed in user has changed");
+ }
+ return data;
+ },
+
+ logout() {
+ // This will be called when sync fails (or when the account is being
+ // unlinked etc). It may have failed because we got a 401 from a sync
+ // server, so we nuke the token. Next time sync runs and wants an
+ // authentication header, we will notice the lack of the token and fetch a
+ // new one.
+ this._token = null;
+ },
+
+ async observe(subject, topic, data) {
+ this._log.debug("observed " + topic);
+ if (!this.username) {
+ this._log.info("Sync is not configured, so ignoring the notification");
+ return;
+ }
+ switch (topic) {
+ case "weave:connected":
+ case fxAccountsCommon.ONLOGIN_NOTIFICATION: {
+ this._log.info("Sync has been connected to a logged in user");
+ this.resetCredentials();
+ let accountData = await this.getSignedInUser();
+
+ if (!accountData.verified) {
+ // wait for a verified notification before we kick sync off.
+ this._log.info("The user is not verified");
+ break;
+ }
+ }
+ // We've been configured with an already verified user, so fall-through.
+ // intentional fall-through - the user is verified.
+ case fxAccountsCommon.ONVERIFIED_NOTIFICATION: {
+ this._log.info("The user became verified");
+ lazy.Weave.Status.login = LOGIN_SUCCEEDED;
+
+ // And actually sync. If we've never synced before, we force a full sync.
+ // If we have, then we are probably just reauthenticating so it's a normal sync.
+ // We can use any pref that must be set if we've synced before, and check
+ // the sync lock state because we might already be doing that first sync.
+ let isFirstSync =
+ !lazy.Weave.Service.locked && !Svc.Prefs.get("client.syncID", null);
+ if (isFirstSync) {
+ this._log.info("Doing initial sync actions");
+ Svc.Prefs.set("firstSync", "resetClient");
+ Services.obs.notifyObservers(null, "weave:service:setup-complete");
+ }
+ // There's no need to wait for sync to complete and it would deadlock
+ // our AsyncObserver.
+ if (!Svc.Prefs.get("testing.tps", false)) {
+ lazy.Weave.Service.sync({ why: "login" });
+ }
+ break;
+ }
+
+ case fxAccountsCommon.ONLOGOUT_NOTIFICATION:
+ lazy.Weave.Service.startOver()
+ .then(() => {
+ this._log.trace("startOver completed");
+ })
+ .catch(err => {
+ this._log.warn("Failed to reset sync", err);
+ });
+ // startOver will cause this instance to be thrown away, so there's
+ // nothing else to do.
+ break;
+
+ case fxAccountsCommon.ON_ACCOUNT_STATE_CHANGE_NOTIFICATION:
+ // throw away token forcing us to fetch a new one later.
+ this.resetCredentials();
+ break;
+ }
+ },
+
+ /**
+ * Provide override point for testing token expiration.
+ */
+ _now() {
+ return this._fxaService._internal.now();
+ },
+
+ get _localtimeOffsetMsec() {
+ return this._fxaService._internal.localtimeOffsetMsec;
+ },
+
+ get syncKeyBundle() {
+ return this._syncKeyBundle;
+ },
+
+ get username() {
+ return this._username;
+ },
+
+ /**
+ * Set the username value.
+ *
+ * Changing the username has the side-effect of wiping credentials.
+ */
+ set username(value) {
+ // setting .username is an old throwback, but it should no longer happen.
+ throw new Error("don't set the username");
+ },
+
+ /**
+ * Resets all calculated credentials we hold for the current user. This will
+ * *not* force the user to reauthenticate, but instead will force us to
+ * calculate a new key bundle, fetch a new token, etc.
+ */
+ resetCredentials() {
+ this._syncKeyBundle = null;
+ this._token = null;
+ // The cluster URL comes from the token, so resetting it to empty will
+ // force Sync to not accidentally use a value from an earlier token.
+ lazy.Weave.Service.clusterURL = null;
+ },
+
+ /**
+ * Pre-fetches any information that might help with migration away from this
+ * identity. Called after every sync and is really just an optimization that
+ * allows us to avoid a network request for when we actually need the
+ * migration info.
+ */
+ prefetchMigrationSentinel(service) {
+ // nothing to do here until we decide to migrate away from FxA.
+ },
+
+ /**
+ * Verify the current auth state, unlocking the master-password if necessary.
+ *
+ * Returns a promise that resolves with the current auth state after
+ * attempting to unlock.
+ */
+ async unlockAndVerifyAuthState() {
+ let data = await this.getSignedInUser();
+ const fxa = this._fxaService;
+ if (!data) {
+ lazy.log.debug("unlockAndVerifyAuthState has no FxA user");
+ return LOGIN_FAILED_NO_USERNAME;
+ }
+ if (!this.username) {
+ lazy.log.debug(
+ "unlockAndVerifyAuthState finds that sync isn't configured"
+ );
+ return LOGIN_FAILED_NO_USERNAME;
+ }
+ if (!data.verified) {
+ // Treat not verified as if the user needs to re-auth, so the browser
+ // UI reflects the state.
+ lazy.log.debug("unlockAndVerifyAuthState has an unverified user");
+ return LOGIN_FAILED_LOGIN_REJECTED;
+ }
+ if (await fxa.keys.canGetKeyForScope(SCOPE_OLD_SYNC)) {
+ lazy.log.debug(
+ "unlockAndVerifyAuthState already has (or can fetch) sync keys"
+ );
+ return STATUS_OK;
+ }
+ // so no keys - ensure MP unlocked.
+ if (!Utils.ensureMPUnlocked()) {
+ // user declined to unlock, so we don't know if they are stored there.
+ lazy.log.debug(
+ "unlockAndVerifyAuthState: user declined to unlock master-password"
+ );
+ return MASTER_PASSWORD_LOCKED;
+ }
+ // If we still can't get keys it probably means the user authenticated
+ // without unlocking the MP or cleared the saved logins, so we've now
+ // lost them - the user will need to reauth before continuing.
+ let result;
+ if (await fxa.keys.canGetKeyForScope(SCOPE_OLD_SYNC)) {
+ result = STATUS_OK;
+ } else {
+ result = LOGIN_FAILED_LOGIN_REJECTED;
+ }
+ lazy.log.debug(
+ "unlockAndVerifyAuthState re-fetched credentials and is returning",
+ result
+ );
+ return result;
+ },
+
+ /**
+ * Do we have a non-null, not yet expired token for the user currently
+ * signed in?
+ */
+ _hasValidToken() {
+ // If pref is set to ignore cached authentication credentials for debugging,
+ // then return false to force the fetching of a new token.
+ if (lazy.IGNORE_CACHED_AUTH_CREDENTIALS) {
+ return false;
+ }
+ if (!this._token) {
+ return false;
+ }
+ if (this._token.expiration < this._now()) {
+ return false;
+ }
+ return true;
+ },
+
+ // Get our tokenServerURL - a private helper. Returns a string.
+ get _tokenServerUrl() {
+ // We used to support services.sync.tokenServerURI but this was a
+ // pain-point for people using non-default servers as Sync may auto-reset
+ // all services.sync prefs. So if that still exists, it wins.
+ let url = Svc.Prefs.get("tokenServerURI"); // Svc.Prefs "root" is services.sync
+ if (!url) {
+ url = Services.prefs.getCharPref("identity.sync.tokenserver.uri");
+ }
+ while (url.endsWith("/")) {
+ // trailing slashes cause problems...
+ url = url.slice(0, -1);
+ }
+ return url;
+ },
+
+ // Refresh the sync token for our user. Returns a promise that resolves
+ // with a token, or rejects with an error.
+ async _fetchTokenForUser() {
+ const fxa = this._fxaService;
+ // We need keys for things to work. If we don't have them, just
+ // return null for the token - sync calling unlockAndVerifyAuthState()
+ // before actually syncing will setup the error states if necessary.
+ if (!(await fxa.keys.canGetKeyForScope(SCOPE_OLD_SYNC))) {
+ this._log.info(
+ "Unable to fetch keys (master-password locked?), so aborting token fetch"
+ );
+ throw new Error("Can't fetch a token as we can't get keys");
+ }
+
+ // Do the token dance, with a retry in case of transient auth failure.
+ // We need to prove that we know the sync key in order to get a token
+ // from the tokenserver.
+ let getToken = async key => {
+ this._log.info("Getting a sync token from", this._tokenServerUrl);
+ let token = await this._fetchTokenUsingOAuth(key);
+ this._log.trace("Successfully got a token");
+ return token;
+ };
+
+ try {
+ let token, key;
+ try {
+ this._log.info("Getting sync key");
+ key = await fxa.keys.getKeyForScope(SCOPE_OLD_SYNC);
+ if (!key) {
+ throw new Error("browser does not have the sync key, cannot sync");
+ }
+ token = await getToken(key);
+ } catch (err) {
+ // If we get a 401 fetching the token it may be that our auth tokens needed
+ // to be regenerated; retry exactly once.
+ if (!err.response || err.response.status !== 401) {
+ throw err;
+ }
+ this._log.warn(
+ "Token server returned 401, retrying token fetch with fresh credentials"
+ );
+ key = await fxa.keys.getKeyForScope(SCOPE_OLD_SYNC);
+ token = await getToken(key);
+ }
+ // TODO: Make it be only 80% of the duration, so refresh the token
+ // before it actually expires. This is to avoid sync storage errors
+ // otherwise, we may briefly enter a "needs reauthentication" state.
+ // (XXX - the above may no longer be true - someone should check ;)
+ token.expiration = this._now() + token.duration * 1000 * 0.8;
+ if (!this._syncKeyBundle) {
+ this._syncKeyBundle = lazy.BulkKeyBundle.fromJWK(key);
+ }
+ lazy.Weave.Status.login = LOGIN_SUCCEEDED;
+ this._token = token;
+ return token;
+ } catch (caughtErr) {
+ let err = caughtErr; // The error we will rethrow.
+
+ // TODO: unify these errors - we need to handle errors thrown by
+ // both tokenserverclient and hawkclient.
+ // A tokenserver error thrown based on a bad response.
+ if (err.response && err.response.status === 401) {
+ err = new AuthenticationError(err, "tokenserver");
+ // A hawkclient error.
+ } else if (err.code && err.code === 401) {
+ err = new AuthenticationError(err, "hawkclient");
+ // An FxAccounts.jsm error.
+ } else if (err.message == fxAccountsCommon.ERROR_AUTH_ERROR) {
+ err = new AuthenticationError(err, "fxaccounts");
+ }
+
+ // TODO: write tests to make sure that different auth error cases are handled here
+ // properly: auth error getting oauth token, auth error getting sync token (invalid
+ // generation or client-state error)
+ if (err instanceof AuthenticationError) {
+ this._log.error("Authentication error in _fetchTokenForUser", err);
+ // set it to the "fatal" LOGIN_FAILED_LOGIN_REJECTED reason.
+ lazy.Weave.Status.login = LOGIN_FAILED_LOGIN_REJECTED;
+ } else {
+ this._log.error("Non-authentication error in _fetchTokenForUser", err);
+ // for now assume it is just a transient network related problem
+ // (although sadly, it might also be a regular unhandled exception)
+ lazy.Weave.Status.login = LOGIN_FAILED_NETWORK_ERROR;
+ }
+ throw err;
+ }
+ },
+
+ /**
+ * Generates an OAuth access_token using the OLD_SYNC scope and exchanges it
+ * for a TokenServer token.
+ *
+ * @returns {Promise}
+ * @private
+ */
+ async _fetchTokenUsingOAuth(key) {
+ this._log.debug("Getting a token using OAuth");
+ const fxa = this._fxaService;
+ const ttl = fxAccountsCommon.OAUTH_TOKEN_FOR_SYNC_LIFETIME_SECONDS;
+ const accessToken = await fxa.getOAuthToken({ scope: SCOPE_OLD_SYNC, ttl });
+ const headers = {
+ "X-KeyId": key.kid,
+ };
+
+ return this._tokenServerClient
+ .getTokenUsingOAuth(this._tokenServerUrl, accessToken, headers)
+ .catch(async err => {
+ if (err.response && err.response.status === 401) {
+ // remove the cached token if we cannot authorize with it.
+ // we have to do this here because we know which `token` to remove
+ // from cache.
+ await fxa.removeCachedOAuthToken({ token: accessToken });
+ }
+
+ // continue the error chain, so other handlers can deal with the error.
+ throw err;
+ });
+ },
+
+ // Returns a promise that is resolved with a valid token for the current
+ // user, or rejects if one can't be obtained.
+ // NOTE: This does all the authentication for Sync - it both sets the
+ // key bundle (ie, decryption keys) and does the token fetch. These 2
+ // concepts could be decoupled, but there doesn't seem any value in that
+ // currently.
+ async _ensureValidToken(forceNewToken = false) {
+ let signedInUser = await this.getSignedInUser();
+ if (!signedInUser) {
+ throw new Error("no user is logged in");
+ }
+ if (!signedInUser.verified) {
+ throw new Error("user is not verified");
+ }
+
+ await this.asyncObserver.promiseObserversComplete();
+
+ if (!forceNewToken && this._hasValidToken()) {
+ this._log.trace("_ensureValidToken already has one");
+ return this._token;
+ }
+
+ // We are going to grab a new token - re-use the same promise if we are
+ // already fetching one.
+ if (!this._ensureValidTokenPromise) {
+ this._ensureValidTokenPromise = this.__ensureValidToken().finally(() => {
+ this._ensureValidTokenPromise = null;
+ });
+ }
+ return this._ensureValidTokenPromise;
+ },
+
+ async __ensureValidToken() {
+ // reset this._token as a safety net to reduce the possibility of us
+ // repeatedly attempting to use an invalid token if _fetchTokenForUser throws.
+ this._token = null;
+ try {
+ let token = await this._fetchTokenForUser();
+ this._token = token;
+ // This is a little bit of a hack. The tokenserver tells us a HMACed version
+ // of the FxA uid which we can use for metrics purposes without revealing the
+ // user's true uid. It conceptually belongs to FxA but we get it from tokenserver
+ // for legacy reasons. Hand it back to the FxA client code to deal with.
+ this._fxaService.telemetry._setHashedUID(token.hashed_fxa_uid);
+ return token;
+ } finally {
+ Services.obs.notifyObservers(null, "weave:service:login:got-hashed-id");
+ }
+ },
+
+ getResourceAuthenticator() {
+ return this._getAuthenticationHeader.bind(this);
+ },
+
+ /**
+ * @return a Hawk HTTP Authorization Header, lightly wrapped, for the .uri
+ * of a RESTRequest or AsyncResponse object.
+ */
+ async _getAuthenticationHeader(httpObject, method) {
+ // Note that in failure states we return null, causing the request to be
+ // made without authorization headers, thereby presumably causing a 401,
+ // which causes Sync to log out. If we throw, this may not happen as
+ // expected.
+ try {
+ await this._ensureValidToken();
+ } catch (ex) {
+ this._log.error("Failed to fetch a token for authentication", ex);
+ return null;
+ }
+ if (!this._token) {
+ return null;
+ }
+ let credentials = { id: this._token.id, key: this._token.key };
+ method = method || httpObject.method;
+
+ // Get the local clock offset from the Firefox Accounts server. This should
+ // be close to the offset from the storage server.
+ let options = {
+ now: this._now(),
+ localtimeOffsetMsec: this._localtimeOffsetMsec,
+ credentials,
+ };
+
+ let headerValue = await CryptoUtils.computeHAWK(
+ httpObject.uri,
+ method,
+ options
+ );
+ return { headers: { authorization: headerValue.field } };
+ },
+
+ /**
+ * Determine the cluster for the current user and update state.
+ * Returns true if a new cluster URL was found and it is different from
+ * the existing cluster URL, false otherwise.
+ */
+ async setCluster() {
+ // Make sure we didn't get some unexpected response for the cluster.
+ let cluster = await this._findCluster();
+ this._log.debug("Cluster value = " + cluster);
+ if (cluster == null) {
+ return false;
+ }
+
+ // Convert from the funky "String object with additional properties" that
+ // resource.js returns to a plain-old string.
+ cluster = cluster.toString();
+ // Don't update stuff if we already have the right cluster
+ if (cluster == lazy.Weave.Service.clusterURL) {
+ return false;
+ }
+
+ this._log.debug("Setting cluster to " + cluster);
+ lazy.Weave.Service.clusterURL = cluster;
+
+ return true;
+ },
+
+ async _findCluster() {
+ try {
+ // Ensure we are ready to authenticate and have a valid token.
+ // We need to handle node reassignment here. If we are being asked
+ // for a clusterURL while the service already has a clusterURL, then
+ // it's likely a 401 was received using the existing token - in which
+ // case we just discard the existing token and fetch a new one.
+ let forceNewToken = false;
+ if (lazy.Weave.Service.clusterURL) {
+ this._log.debug(
+ "_findCluster has a pre-existing clusterURL, so fetching a new token token"
+ );
+ forceNewToken = true;
+ }
+ let token = await this._ensureValidToken(forceNewToken);
+ let endpoint = token.endpoint;
+ // For Sync 1.5 storage endpoints, we use the base endpoint verbatim.
+ // However, it should end in "/" because we will extend it with
+ // well known path components. So we add a "/" if it's missing.
+ if (!endpoint.endsWith("/")) {
+ endpoint += "/";
+ }
+ this._log.debug("_findCluster returning " + endpoint);
+ return endpoint;
+ } catch (err) {
+ this._log.info("Failed to fetch the cluster URL", err);
+ // service.js's verifyLogin() method will attempt to fetch a cluster
+ // URL when it sees a 401. If it gets null, it treats it as a "real"
+ // auth error and sets Status.login to LOGIN_FAILED_LOGIN_REJECTED, which
+ // in turn causes a notification bar to appear informing the user they
+ // need to re-authenticate.
+ // On the other hand, if fetching the cluster URL fails with an exception,
+ // verifyLogin() assumes it is a transient error, and thus doesn't show
+ // the notification bar under the assumption the issue will resolve
+ // itself.
+ // Thus:
+ // * On a real 401, we must return null.
+ // * On any other problem we must let an exception bubble up.
+ if (err instanceof AuthenticationError) {
+ return null;
+ }
+ throw err;
+ }
+ },
+};
diff --git a/services/sync/modules/telemetry.sys.mjs b/services/sync/modules/telemetry.sys.mjs
new file mode 100644
index 0000000000..9a12be9714
--- /dev/null
+++ b/services/sync/modules/telemetry.sys.mjs
@@ -0,0 +1,1313 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Support for Sync-and-FxA-related telemetry, which is submitted in a special-purpose
+// telemetry ping called the "sync ping", documented here:
+//
+// ../../../toolkit/components/telemetry/docs/data/sync-ping.rst
+//
+// The sync ping contains identifiers that are linked to the user's Firefox Account
+// and are separate from the main telemetry client_id, so this file is also responsible
+// for ensuring that we can delete those pings upon user request, by plumbing its
+// identifiers into the "deletion-request" ping.
+
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+import { Log } from "resource://gre/modules/Log.sys.mjs";
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ Async: "resource://services-common/async.sys.mjs",
+ AuthenticationError: "resource://services-sync/sync_auth.sys.mjs",
+ FxAccounts: "resource://gre/modules/FxAccounts.sys.mjs",
+ Observers: "resource://services-common/observers.sys.mjs",
+ Resource: "resource://services-sync/resource.sys.mjs",
+ Status: "resource://services-sync/status.sys.mjs",
+ Svc: "resource://services-sync/util.sys.mjs",
+ TelemetryController: "resource://gre/modules/TelemetryController.sys.mjs",
+ TelemetryEnvironment: "resource://gre/modules/TelemetryEnvironment.sys.mjs",
+ TelemetryUtils: "resource://gre/modules/TelemetryUtils.sys.mjs",
+ Weave: "resource://services-sync/main.sys.mjs",
+});
+
+XPCOMUtils.defineLazyModuleGetters(lazy, {
+ ObjectUtils: "resource://gre/modules/ObjectUtils.jsm",
+});
+
+XPCOMUtils.defineLazyGetter(lazy, "fxAccounts", () => {
+ return ChromeUtils.importESModule(
+ "resource://gre/modules/FxAccounts.sys.mjs"
+ ).getFxAccountsSingleton();
+});
+
+import * as constants from "resource://services-sync/constants.sys.mjs";
+
+XPCOMUtils.defineLazyGetter(
+ lazy,
+ "WeaveService",
+ () => Cc["@mozilla.org/weave/service;1"].getService().wrappedJSObject
+);
+const log = Log.repository.getLogger("Sync.Telemetry");
+
+const TOPICS = [
+ // For tracking change to account/device identifiers.
+ "fxaccounts:new_device_id",
+ "fxaccounts:onlogout",
+ "weave:service:ready",
+ "weave:service:login:got-hashed-id",
+
+ // For whole-of-sync metrics.
+ "weave:service:sync:start",
+ "weave:service:sync:finish",
+ "weave:service:sync:error",
+
+ // For individual engine metrics.
+ "weave:engine:sync:start",
+ "weave:engine:sync:finish",
+ "weave:engine:sync:error",
+ "weave:engine:sync:applied",
+ "weave:engine:sync:step",
+ "weave:engine:sync:uploaded",
+ "weave:engine:validate:finish",
+ "weave:engine:validate:error",
+
+ // For ad-hoc telemetry events.
+ "weave:telemetry:event",
+ "weave:telemetry:histogram",
+ "fxa:telemetry:event",
+
+ "weave:telemetry:migration",
+];
+
+const PING_FORMAT_VERSION = 1;
+
+const EMPTY_UID = "0".repeat(32);
+
+// The set of engines we record telemetry for - any other engines are ignored.
+const ENGINES = new Set([
+ "addons",
+ "bookmarks",
+ "clients",
+ "forms",
+ "history",
+ "passwords",
+ "prefs",
+ "tabs",
+ "extension-storage",
+ "addresses",
+ "creditcards",
+]);
+
+function tryGetMonotonicTimestamp() {
+ try {
+ return Services.telemetry.msSinceProcessStart();
+ } catch (e) {
+ log.warn("Unable to get a monotonic timestamp!");
+ return -1;
+ }
+}
+
+function timeDeltaFrom(monotonicStartTime) {
+ let now = tryGetMonotonicTimestamp();
+ if (monotonicStartTime !== -1 && now !== -1) {
+ return Math.round(now - monotonicStartTime);
+ }
+ return -1;
+}
+
+const NS_ERROR_MODULE_BASE_OFFSET = 0x45;
+const NS_ERROR_MODULE_NETWORK = 6;
+
+// A reimplementation of NS_ERROR_GET_MODULE, which surprisingly doesn't seem
+// to exist anywhere in .js code in a way that can be reused.
+// This is taken from DownloadCore.sys.mjs.
+function NS_ERROR_GET_MODULE(code) {
+ return ((code & 0x7fff0000) >> 16) - NS_ERROR_MODULE_BASE_OFFSET;
+}
+
+// Converts extra integer fields to strings, rounds floats to three
+// decimal places (nanosecond precision for timings), and removes profile
+// directory paths and URLs from potential error messages.
+function normalizeExtraTelemetryFields(extra) {
+ let result = {};
+ for (let key in extra) {
+ let value = extra[key];
+ let type = typeof value;
+ if (type == "string") {
+ result[key] = ErrorSanitizer.cleanErrorMessage(value);
+ } else if (type == "number") {
+ result[key] = Number.isInteger(value)
+ ? value.toString(10)
+ : value.toFixed(3);
+ } else if (type != "undefined") {
+ throw new TypeError(
+ `Invalid type ${type} for extra telemetry field ${key}`
+ );
+ }
+ }
+ return lazy.ObjectUtils.isEmpty(result) ? undefined : result;
+}
+
+// Keps track of the counts of individual records fate during a sync cycle
+// The main reason this is a class is to keep track of reasons individual records
+// failure reasons without huge memory overhead.
+export class SyncedRecordsTelemetry {
+ // applied => number of items that should be applied.
+ // failed => number of items that failed in this sync.
+ // newFailed => number of items that failed for the first time in this sync.
+ // reconciled => number of items that were reconciled.
+ // failedReasons => {name, count} of reasons a record failed
+ incomingCounts = {
+ applied: 0,
+ failed: 0,
+ newFailed: 0,
+ reconciled: 0,
+ failedReasons: null,
+ };
+ outgoingCounts = { failed: 0, sent: 0, failedReasons: null };
+
+ addIncomingFailedReason(reason) {
+ if (!this.incomingCounts.failedReasons) {
+ this.incomingCounts.failedReasons = [];
+ }
+ let transformedReason = SyncTelemetry.transformError(reason);
+ // Some errors like http/nss errors don't have an error object
+ // those will be caught by the higher level telemetry
+ if (!transformedReason.error) {
+ return;
+ }
+
+ let index = this.incomingCounts.failedReasons.findIndex(
+ reasons => reasons.name === transformedReason.error
+ );
+
+ if (index >= 0) {
+ this.incomingCounts.failedReasons[index].count += 1;
+ } else {
+ this.incomingCounts.failedReasons.push({
+ name: transformedReason.error,
+ count: 1,
+ });
+ }
+ }
+
+ addOutgoingFailedReason(reason) {
+ if (!this.outgoingCounts.failedReasons) {
+ this.outgoingCounts.failedReasons = [];
+ }
+ let transformedReason = SyncTelemetry.transformError(reason);
+ // Some errors like http/nss errors don't have an error object
+ // those will be caught by the higher level telemetry
+ if (!transformedReason.error) {
+ return;
+ }
+ let index = this.outgoingCounts.failedReasons.findIndex(
+ reasons => reasons.name === transformedReason.error
+ );
+ if (index >= 0) {
+ this.outgoingCounts.failedReasons[index].count += 1;
+ } else {
+ this.outgoingCounts.failedReasons.push({
+ name: transformedReason.error,
+ count: 1,
+ });
+ }
+ }
+}
+
+// The `ErrorSanitizer` has 2 main jobs:
+// * Remove PII from errors, such as the profile dir or URLs the user might
+// have visited.
+// * Normalize errors so different locales or operating systems etc don't
+// generate different messages for the same underlying error.
+// * [TODO] Normalize errors so environmental factors don't influence message.
+// For example, timestamps or GUIDs should be replaced with something static.
+export class ErrorSanitizer {
+ // Things we normalize - this isn't exhaustive, but covers the common error messages we see.
+ // Eg:
+ // > Win error 112 during operation write on file [profileDir]\weave\addonsreconciler.json (Espacio en disco insuficiente. )
+ // > Win error 112 during operation write on file [profileDir]\weave\addonsreconciler.json (Diskte yeterli yer yok. )
+ // > <snip many other translations of the error>
+ // > Unix error 28 during operation write on file [profileDir]/weave/addonsreconciler.json (No space left on device)
+ // These tend to crowd out other errors we might care about (eg, top 5 errors for some engines are
+ // variations of the "no space left on device")
+
+ // Note that only errors that have same-but-different errors on Windows and Unix are here - we
+ // still sanitize ones that aren't in these maps to remove the translations etc - eg,
+ // `ERROR_SHARING_VIOLATION` doesn't really have a unix equivalent, so doesn't appear here, but
+ // we still strip the translations to avoid the variants.
+ static E_NO_SPACE_ON_DEVICE = "OS error [No space left on device]";
+ static E_PERMISSION_DENIED = "OS error [Permission denied]";
+ static E_NO_FILE_OR_DIR = "OS error [File/Path not found]";
+ static E_NO_MEM = "OS error [No memory]";
+
+ static WindowsErrorSubstitutions = {
+ 2: this.E_NO_FILE_OR_DIR, // ERROR_FILE_NOT_FOUND
+ 3: this.E_NO_FILE_OR_DIR, // ERROR_PATH_NOT_FOUND
+ 5: this.E_PERMISSION_DENIED, // ERROR_ACCESS_DENIED
+ 8: this.E_NO_MEM, // ERROR_NOT_ENOUGH_MEMORY
+ 112: this.E_NO_SPACE_ON_DEVICE, // ERROR_DISK_FULL
+ };
+
+ static UnixErrorSubstitutions = {
+ 2: this.E_NO_FILE_OR_DIR, // ENOENT
+ 12: this.E_NO_MEM, // ENOMEM
+ 13: this.E_PERMISSION_DENIED, // EACCESS
+ 28: this.E_NO_SPACE_ON_DEVICE, // ENOSPC
+ };
+
+ static DOMErrorSubstitutions = {
+ NotFoundError: this.E_NO_FILE_OR_DIR,
+ NotAllowedError: this.E_PERMISSION_DENIED,
+ };
+
+ static reWinError =
+ /^(?<head>Win error (?<errno>\d+))(?<detail>.*) \(.*\r?\n?\)$/m;
+ static reUnixError =
+ /^(?<head>Unix error (?<errno>\d+))(?<detail>.*) \(.*\)$/;
+
+ static #cleanOSErrorMessage(message, error = undefined) {
+ if (DOMException.isInstance(error)) {
+ const sub = this.DOMErrorSubstitutions[error.name];
+ message = message.replaceAll("\\", "/");
+ if (sub) {
+ return `${sub} ${message}`;
+ }
+ }
+
+ let match = this.reWinError.exec(message);
+ if (match) {
+ let head =
+ this.WindowsErrorSubstitutions[match.groups.errno] || match.groups.head;
+ return head + match.groups.detail.replaceAll("\\", "/");
+ }
+ match = this.reUnixError.exec(message);
+ if (match) {
+ let head =
+ this.UnixErrorSubstitutions[match.groups.errno] || match.groups.head;
+ return head + match.groups.detail;
+ }
+ return message;
+ }
+
+ // A regex we can use to replace the profile dir in error messages. We use a
+ // regexp so we can simply replace all case-insensitive occurences.
+ // This escaping function is from:
+ // https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions
+ static reProfileDir = new RegExp(
+ PathUtils.profileDir.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"),
+ "gi"
+ );
+
+ /**
+ * Clean an error message, removing PII and normalizing OS-specific messages.
+ *
+ * @param {string} message The error message
+ * @param {Error?} error The error class instance, if any.
+ */
+ static cleanErrorMessage(message, error = undefined) {
+ // There's a chance the profiledir is in the error string which is PII we
+ // want to avoid including in the ping.
+ message = message.replace(this.reProfileDir, "[profileDir]");
+ // MSG_INVALID_URL from /dom/bindings/Errors.msg -- no way to access this
+ // directly from JS.
+ if (message.endsWith("is not a valid URL.")) {
+ message = "<URL> is not a valid URL.";
+ }
+ // Try to filter things that look somewhat like a URL (in that they contain a
+ // colon in the middle of non-whitespace), in case anything else is including
+ // these in error messages. Note that JSON.stringified stuff comes through
+ // here, so we explicitly ignore double-quotes as well.
+ message = message.replace(/[^\s"]+:[^\s"]+/g, "<URL>");
+
+ // Anywhere that's normalized the guid in errors we can easily filter
+ // to make it easier to aggregate these types of errors
+ message = message.replace(/<guid: ([^>]+)>/g, "<GUID>");
+
+ return this.#cleanOSErrorMessage(message, error);
+ }
+}
+
+// This function validates the payload of a telemetry "event" - this can be
+// removed once there are APIs available for the telemetry modules to collect
+// these events (bug 1329530) - but for now we simulate that planned API as
+// best we can.
+function validateTelemetryEvent(eventDetails) {
+ let { object, method, value, extra } = eventDetails;
+ // Do do basic validation of the params - everything except "extra" must
+ // be a string. method and object are required.
+ if (
+ typeof method != "string" ||
+ typeof object != "string" ||
+ (value && typeof value != "string") ||
+ (extra && typeof extra != "object")
+ ) {
+ log.warn("Invalid event parameters - wrong types", eventDetails);
+ return false;
+ }
+ // length checks.
+ if (
+ method.length > 20 ||
+ object.length > 20 ||
+ (value && value.length > 80)
+ ) {
+ log.warn("Invalid event parameters - wrong lengths", eventDetails);
+ return false;
+ }
+
+ // extra can be falsey, or an object with string names and values.
+ if (extra) {
+ if (Object.keys(extra).length > 10) {
+ log.warn("Invalid event parameters - too many extra keys", eventDetails);
+ return false;
+ }
+ for (let [ename, evalue] of Object.entries(extra)) {
+ if (
+ typeof ename != "string" ||
+ ename.length > 15 ||
+ typeof evalue != "string" ||
+ evalue.length > 85
+ ) {
+ log.warn(
+ `Invalid event parameters: extra item "${ename} is invalid`,
+ eventDetails
+ );
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+class EngineRecord {
+ constructor(name) {
+ // startTime is in ms from process start, but is monotonic (unlike Date.now())
+ // so we need to keep both it and when.
+ this.startTime = tryGetMonotonicTimestamp();
+ this.name = name;
+
+ // This allows cases like bookmarks-buffered to have a separate name from
+ // the bookmarks engine.
+ let engineImpl = lazy.Weave.Service.engineManager.get(name);
+ if (engineImpl && engineImpl.overrideTelemetryName) {
+ this.overrideTelemetryName = engineImpl.overrideTelemetryName;
+ }
+ }
+
+ toJSON() {
+ let result = { name: this.overrideTelemetryName || this.name };
+ let properties = [
+ "took",
+ "status",
+ "failureReason",
+ "incoming",
+ "outgoing",
+ "validation",
+ "steps",
+ ];
+ for (let property of properties) {
+ result[property] = this[property];
+ }
+ return result;
+ }
+
+ finished(error) {
+ let took = timeDeltaFrom(this.startTime);
+ if (took > 0) {
+ this.took = took;
+ }
+ if (error) {
+ this.failureReason = SyncTelemetry.transformError(error);
+ }
+ }
+
+ recordApplied(counts) {
+ if (this.incoming) {
+ log.error(
+ `Incoming records applied multiple times for engine ${this.name}!`
+ );
+ return;
+ }
+ if (this.name === "clients" && !counts.failed) {
+ // ignore successful application of client records
+ // since otherwise they show up every time and are meaningless.
+ return;
+ }
+
+ let incomingData = {};
+
+ if (counts.failedReasons) {
+ // sort the failed reasons in desc by count, then take top 10
+ counts.failedReasons = counts.failedReasons
+ .sort((a, b) => b.count - a.count)
+ .slice(0, 10);
+ }
+ // Counts has extra stuff used for logging, but we only care about a few
+ let properties = ["applied", "failed", "failedReasons"];
+ // Only record non-zero properties and only record incoming at all if
+ // there's at least one property we care about.
+ for (let property of properties) {
+ if (counts[property]) {
+ incomingData[property] = counts[property];
+ this.incoming = incomingData;
+ }
+ }
+ }
+
+ recordStep(stepResult) {
+ let step = {
+ name: stepResult.name,
+ };
+ if (stepResult.took > 0) {
+ step.took = Math.round(stepResult.took);
+ }
+ if (stepResult.counts) {
+ let counts = stepResult.counts.filter(({ count }) => count > 0);
+ if (counts.length) {
+ step.counts = counts;
+ }
+ }
+ if (this.steps) {
+ this.steps.push(step);
+ } else {
+ this.steps = [step];
+ }
+ }
+
+ recordValidation(validationResult) {
+ if (this.validation) {
+ log.error(`Multiple validations occurred for engine ${this.name}!`);
+ return;
+ }
+ let { problems, version, took, checked } = validationResult;
+ let validation = {
+ version: version || 0,
+ checked: checked || 0,
+ };
+ if (took > 0) {
+ validation.took = Math.round(took);
+ }
+ let summarized = problems.filter(({ count }) => count > 0);
+ if (summarized.length) {
+ validation.problems = summarized;
+ }
+ this.validation = validation;
+ }
+
+ recordValidationError(e) {
+ if (this.validation) {
+ log.error(`Multiple validations occurred for engine ${this.name}!`);
+ return;
+ }
+
+ this.validation = {
+ failureReason: SyncTelemetry.transformError(e),
+ };
+ }
+
+ recordUploaded(counts) {
+ if (counts.sent || counts.failed) {
+ if (!this.outgoing) {
+ this.outgoing = [];
+ }
+ if (counts.failedReasons) {
+ // sort the failed reasons in desc by count, then take top 10
+ counts.failedReasons = counts.failedReasons
+ .sort((a, b) => b.count - a.count)
+ .slice(0, 10);
+ }
+ this.outgoing.push({
+ sent: counts.sent || undefined,
+ failed: counts.failed || undefined,
+ failedReasons: counts.failedReasons || undefined,
+ });
+ }
+ }
+}
+
+// The record of a single "sync" - typically many of these are submitted in
+// a single ping (ie, as a 'syncs' array)
+export class SyncRecord {
+ constructor(allowedEngines, why) {
+ this.allowedEngines = allowedEngines;
+ // Our failure reason. This property only exists in the generated ping if an
+ // error actually occurred.
+ this.failureReason = undefined;
+ this.syncNodeType = null;
+ this.when = Date.now();
+ this.startTime = tryGetMonotonicTimestamp();
+ this.took = 0; // will be set later.
+ this.why = why;
+
+ // All engines that have finished (ie, does not include the "current" one)
+ // We omit this from the ping if it's empty.
+ this.engines = [];
+ // The engine that has started but not yet stopped.
+ this.currentEngine = null;
+ }
+
+ toJSON() {
+ let result = {
+ when: this.when,
+ took: this.took,
+ failureReason: this.failureReason,
+ status: this.status,
+ };
+ if (this.why) {
+ result.why = this.why;
+ }
+ let engines = [];
+ for (let engine of this.engines) {
+ engines.push(engine.toJSON());
+ }
+ if (engines.length) {
+ result.engines = engines;
+ }
+ return result;
+ }
+
+ finished(error) {
+ this.took = timeDeltaFrom(this.startTime);
+ if (this.currentEngine != null) {
+ log.error(
+ "Finished called for the sync before the current engine finished"
+ );
+ this.currentEngine.finished(null);
+ this.onEngineStop(this.currentEngine.name);
+ }
+ if (error) {
+ this.failureReason = SyncTelemetry.transformError(error);
+ }
+
+ this.syncNodeType = lazy.Weave.Service.identity.telemetryNodeType;
+
+ // Check for engine statuses. -- We do this now, and not in engine.finished
+ // to make sure any statuses that get set "late" are recorded
+ for (let engine of this.engines) {
+ let status = lazy.Status.engines[engine.name];
+ if (status && status !== constants.ENGINE_SUCCEEDED) {
+ engine.status = status;
+ }
+ }
+
+ let statusObject = {};
+
+ let serviceStatus = lazy.Status.service;
+ if (serviceStatus && serviceStatus !== constants.STATUS_OK) {
+ statusObject.service = serviceStatus;
+ this.status = statusObject;
+ }
+ let syncStatus = lazy.Status.sync;
+ if (syncStatus && syncStatus !== constants.SYNC_SUCCEEDED) {
+ statusObject.sync = syncStatus;
+ this.status = statusObject;
+ }
+ }
+
+ onEngineStart(engineName) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+
+ if (this.currentEngine) {
+ log.error(
+ `Being told that engine ${engineName} has started, but current engine ${this.currentEngine.name} hasn't stopped`
+ );
+ // Just discard the current engine rather than making up data for it.
+ }
+ this.currentEngine = new EngineRecord(engineName);
+ }
+
+ onEngineStop(engineName, error) {
+ // We only care if it's the current engine if we have a current engine.
+ if (this._shouldIgnoreEngine(engineName, !!this.currentEngine)) {
+ return;
+ }
+ if (!this.currentEngine) {
+ // It's possible for us to get an error before the start message of an engine
+ // (somehow), in which case we still want to record that error.
+ if (!error) {
+ return;
+ }
+ log.error(
+ `Error triggered on ${engineName} when no current engine exists: ${error}`
+ );
+ this.currentEngine = new EngineRecord(engineName);
+ }
+ this.currentEngine.finished(error);
+ this.engines.push(this.currentEngine);
+ this.currentEngine = null;
+ }
+
+ onEngineApplied(engineName, counts) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordApplied(counts);
+ }
+
+ onEngineStep(engineName, step) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordStep(step);
+ }
+
+ onEngineValidated(engineName, validationData) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+ let engine = this.engines.find(e => e.name === engineName);
+ if (
+ !engine &&
+ this.currentEngine &&
+ engineName === this.currentEngine.name
+ ) {
+ engine = this.currentEngine;
+ }
+ if (engine) {
+ engine.recordValidation(validationData);
+ } else {
+ log.warn(
+ `Validation event triggered for engine ${engineName}, which hasn't been synced!`
+ );
+ }
+ }
+
+ onEngineValidateError(engineName, error) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+ let engine = this.engines.find(e => e.name === engineName);
+ if (
+ !engine &&
+ this.currentEngine &&
+ engineName === this.currentEngine.name
+ ) {
+ engine = this.currentEngine;
+ }
+ if (engine) {
+ engine.recordValidationError(error);
+ } else {
+ log.warn(
+ `Validation failure event triggered for engine ${engineName}, which hasn't been synced!`
+ );
+ }
+ }
+
+ onEngineUploaded(engineName, counts) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordUploaded(counts);
+ }
+
+ _shouldIgnoreEngine(engineName, shouldBeCurrent = true) {
+ if (!this.allowedEngines.has(engineName)) {
+ log.info(
+ `Notification for engine ${engineName}, but we aren't recording telemetry for it`
+ );
+ return true;
+ }
+ if (shouldBeCurrent) {
+ if (!this.currentEngine || engineName != this.currentEngine.name) {
+ log.info(`Notification for engine ${engineName} but it isn't current`);
+ return true;
+ }
+ }
+ return false;
+ }
+}
+
+// The entire "sync ping" - it includes all the syncs, events etc recorded in
+// the ping.
+class SyncTelemetryImpl {
+ constructor(allowedEngines) {
+ log.manageLevelFromPref("services.sync.log.logger.telemetry");
+ // This is accessible so we can enable custom engines during tests.
+ this.allowedEngines = allowedEngines;
+ this.current = null;
+ this.setupObservers();
+
+ this.payloads = [];
+ this.discarded = 0;
+ this.events = [];
+ this.histograms = {};
+ this.migrations = [];
+ this.maxEventsCount = lazy.Svc.Prefs.get("telemetry.maxEventsCount", 1000);
+ this.maxPayloadCount = lazy.Svc.Prefs.get("telemetry.maxPayloadCount");
+ this.submissionInterval =
+ lazy.Svc.Prefs.get("telemetry.submissionInterval") * 1000;
+ this.lastSubmissionTime = Services.telemetry.msSinceProcessStart();
+ this.lastUID = EMPTY_UID;
+ this.lastSyncNodeType = null;
+ this.currentSyncNodeType = null;
+ // Note that the sessionStartDate is somewhat arbitrary - the telemetry
+ // modules themselves just use `new Date()`. This means that our startDate
+ // isn't going to be the same as the sessionStartDate in the main pings,
+ // but that's OK for now - if it's a problem we'd need to change the
+ // telemetry modules to expose what it thinks the sessionStartDate is.
+ let sessionStartDate = new Date();
+ this.sessionStartDate = lazy.TelemetryUtils.toLocalTimeISOString(
+ lazy.TelemetryUtils.truncateToHours(sessionStartDate)
+ );
+ lazy.TelemetryController.registerSyncPingShutdown(() => this.shutdown());
+ }
+
+ sanitizeFxaDeviceId(deviceId) {
+ return lazy.fxAccounts.telemetry.sanitizeDeviceId(deviceId);
+ }
+
+ prepareFxaDevices(devices) {
+ // For non-sync users, the data per device is limited -- just an id and a
+ // type (and not even the id yet). For sync users, if we can correctly map
+ // the fxaDevice to a sync device, then we can get os and version info,
+ // which would be quite unfortunate to lose.
+ let extraInfoMap = new Map();
+ if (this.syncIsEnabled()) {
+ for (let client of this.getClientsEngineRecords()) {
+ if (client.fxaDeviceId) {
+ extraInfoMap.set(client.fxaDeviceId, {
+ os: client.os,
+ version: client.version,
+ syncID: this.sanitizeFxaDeviceId(client.id),
+ });
+ }
+ }
+ }
+ // Finally, sanitize and convert to the proper format.
+ return devices.map(d => {
+ let { os, version, syncID } = extraInfoMap.get(d.id) || {
+ os: undefined,
+ version: undefined,
+ syncID: undefined,
+ };
+ return {
+ id: this.sanitizeFxaDeviceId(d.id) || EMPTY_UID,
+ type: d.type,
+ os,
+ version,
+ syncID,
+ };
+ });
+ }
+
+ syncIsEnabled() {
+ return lazy.WeaveService.enabled && lazy.WeaveService.ready;
+ }
+
+ // Separate for testing.
+ getClientsEngineRecords() {
+ if (!this.syncIsEnabled()) {
+ throw new Error("Bug: syncIsEnabled() must be true, check it first");
+ }
+ return lazy.Weave.Service.clientsEngine.remoteClients;
+ }
+
+ updateFxaDevices(devices) {
+ if (!devices) {
+ return {};
+ }
+ let me = devices.find(d => d.isCurrentDevice);
+ let id = me ? this.sanitizeFxaDeviceId(me.id) : undefined;
+ let cleanDevices = this.prepareFxaDevices(devices);
+ return { deviceID: id, devices: cleanDevices };
+ }
+
+ getFxaDevices() {
+ return lazy.fxAccounts.device.recentDeviceList;
+ }
+
+ getPingJSON(reason) {
+ let { devices, deviceID } = this.updateFxaDevices(this.getFxaDevices());
+ return {
+ os: lazy.TelemetryEnvironment.currentEnvironment.system.os,
+ why: reason,
+ devices,
+ discarded: this.discarded || undefined,
+ version: PING_FORMAT_VERSION,
+ syncs: this.payloads.slice(),
+ uid: this.lastUID,
+ syncNodeType: this.lastSyncNodeType || undefined,
+ deviceID,
+ sessionStartDate: this.sessionStartDate,
+ events: !this.events.length ? undefined : this.events,
+ migrations: !this.migrations.length ? undefined : this.migrations,
+ histograms: !Object.keys(this.histograms).length
+ ? undefined
+ : this.histograms,
+ };
+ }
+
+ _addMigrationRecord(type, info) {
+ log.debug("Saw telemetry migration info", type, info);
+ // Updates to this need to be documented in `sync-ping.rst`
+ switch (type) {
+ case "webext-storage":
+ this.migrations.push({
+ type: "webext-storage",
+ entries: +info.entries,
+ entriesSuccessful: +info.entries_successful,
+ extensions: +info.extensions,
+ extensionsSuccessful: +info.extensions_successful,
+ openFailure: !!info.open_failure,
+ });
+ break;
+ default:
+ throw new Error("Bug: Unknown migration record type " + type);
+ }
+ }
+
+ finish(reason) {
+ // Note that we might be in the middle of a sync right now, and so we don't
+ // want to touch this.current.
+ let result = this.getPingJSON(reason);
+ this.payloads = [];
+ this.discarded = 0;
+ this.events = [];
+ this.migrations = [];
+ this.histograms = {};
+ this.submit(result);
+ }
+
+ setupObservers() {
+ for (let topic of TOPICS) {
+ lazy.Observers.add(topic, this, this);
+ }
+ }
+
+ shutdown() {
+ this.finish("shutdown");
+ for (let topic of TOPICS) {
+ lazy.Observers.remove(topic, this, this);
+ }
+ }
+
+ submit(record) {
+ if (!this.isProductionSyncUser()) {
+ return false;
+ }
+ // We still call submit() with possibly illegal payloads so that tests can
+ // know that the ping was built. We don't end up submitting them, however.
+ let numEvents = record.events ? record.events.length : 0;
+ let numMigrations = record.migrations ? record.migrations.length : 0;
+ if (record.syncs.length || numEvents || numMigrations) {
+ log.trace(
+ `submitting ${record.syncs.length} sync record(s) and ` +
+ `${numEvents} event(s) to telemetry`
+ );
+ lazy.TelemetryController.submitExternalPing("sync", record, {
+ usePingSender: true,
+ }).catch(err => {
+ log.error("failed to submit ping", err);
+ });
+ return true;
+ }
+ return false;
+ }
+
+ isProductionSyncUser() {
+ // If FxA isn't production then we treat sync as not being production.
+ // Further, there's the deprecated "services.sync.tokenServerURI" pref we
+ // need to consider - fxa doesn't consider that as if that's the only
+ // pref set, they *are* running a production fxa, just not production sync.
+ if (
+ !lazy.FxAccounts.config.isProductionConfig() ||
+ Services.prefs.prefHasUserValue("services.sync.tokenServerURI")
+ ) {
+ log.trace(`Not sending telemetry ping for self-hosted Sync user`);
+ return false;
+ }
+ return true;
+ }
+
+ onSyncStarted(data) {
+ const why = data && JSON.parse(data).why;
+ if (this.current) {
+ log.warn(
+ "Observed weave:service:sync:start, but we're already recording a sync!"
+ );
+ // Just discard the old record, consistent with our handling of engines, above.
+ this.current = null;
+ }
+ this.current = new SyncRecord(this.allowedEngines, why);
+ }
+
+ // We need to ensure that the telemetry `deletion-request` ping always contains the user's
+ // current sync device ID, because if the user opts out of telemetry then the deletion ping
+ // will be immediately triggered for sending, and we won't have a chance to fill it in later.
+ // This keeps the `deletion-ping` up-to-date when the user's account state changes.
+ onAccountInitOrChange() {
+ // We don't submit sync pings for self-hosters, so don't need to collect their device ids either.
+ if (!this.isProductionSyncUser()) {
+ return;
+ }
+ // Awkwardly async, but no need to await. If the user's account state changes while
+ // this promise is in flight, it will reject and we won't record any data in the ping.
+ // (And a new notification will trigger us to try again with the new state).
+ lazy.fxAccounts.device
+ .getLocalId()
+ .then(deviceId => {
+ let sanitizedDeviceId =
+ lazy.fxAccounts.telemetry.sanitizeDeviceId(deviceId);
+ // In the past we did not persist the FxA metrics identifiers to disk,
+ // so this might be missing until we can fetch it from the server for the
+ // first time. There will be a fresh notification tirggered when it's available.
+ if (sanitizedDeviceId) {
+ // Sanitized device ids are 64 characters long, but telemetry limits scalar strings to 50.
+ // The first 32 chars are sufficient to uniquely identify the device, so just send those.
+ // It's hard to change the sync ping itself to only send 32 chars, to b/w compat reasons.
+ sanitizedDeviceId = sanitizedDeviceId.substr(0, 32);
+ Services.telemetry.scalarSet(
+ "deletion.request.sync_device_id",
+ sanitizedDeviceId
+ );
+ }
+ })
+ .catch(err => {
+ log.warn(
+ `Failed to set sync identifiers in the deletion-request ping: ${err}`
+ );
+ });
+ }
+
+ // This keeps the `deletion-request` ping up-to-date when the user signs out,
+ // clearing the now-nonexistent sync device id.
+ onAccountLogout() {
+ Services.telemetry.scalarSet("deletion.request.sync_device_id", "");
+ }
+
+ _checkCurrent(topic) {
+ if (!this.current) {
+ // This is only `info` because it happens when we do a tabs "quick-write"
+ log.info(
+ `Observed notification ${topic} but no current sync is being recorded.`
+ );
+ return false;
+ }
+ return true;
+ }
+
+ _shouldSubmitForDataChange() {
+ let newID = lazy.fxAccounts.telemetry.getSanitizedUID() || EMPTY_UID;
+ let oldID = this.lastUID;
+ if (
+ newID != EMPTY_UID &&
+ oldID != EMPTY_UID &&
+ // Both are "real" uids, so we care if they've changed.
+ newID != oldID
+ ) {
+ log.trace(
+ `shouldSubmitForDataChange - uid from '${oldID}' -> '${newID}'`
+ );
+ return true;
+ }
+ // We've gone from knowing one of the ids to not knowing it (which we
+ // ignore) or we've gone from not knowing it to knowing it (which is fine),
+ // Now check the node type because a change there also means we should
+ // submit.
+ if (
+ this.lastSyncNodeType &&
+ this.currentSyncNodeType != this.lastSyncNodeType
+ ) {
+ log.trace(
+ `shouldSubmitForDataChange - nodeType from '${this.lastSyncNodeType}' -> '${this.currentSyncNodeType}'`
+ );
+ return true;
+ }
+ log.trace("shouldSubmitForDataChange - no need to submit");
+ return false;
+ }
+
+ maybeSubmitForDataChange() {
+ if (this._shouldSubmitForDataChange()) {
+ log.info(
+ "Early submission of sync telemetry due to changed IDs/NodeType"
+ );
+ this.finish("idchange"); // this actually submits.
+ this.lastSubmissionTime = Services.telemetry.msSinceProcessStart();
+ }
+
+ // Only update the last UIDs if we actually know them.
+ let current_uid = lazy.fxAccounts.telemetry.getSanitizedUID();
+ if (current_uid) {
+ this.lastUID = current_uid;
+ }
+ if (this.currentSyncNodeType) {
+ this.lastSyncNodeType = this.currentSyncNodeType;
+ }
+ }
+
+ maybeSubmitForInterval() {
+ // We want to submit the ping every `this.submissionInterval` but only when
+ // there's no current sync in progress, otherwise we may end up submitting
+ // the sync and the events caused by it in different pings.
+ if (
+ this.current == null &&
+ Services.telemetry.msSinceProcessStart() - this.lastSubmissionTime >
+ this.submissionInterval
+ ) {
+ this.finish("schedule");
+ this.lastSubmissionTime = Services.telemetry.msSinceProcessStart();
+ }
+ }
+
+ onSyncFinished(error) {
+ if (!this.current) {
+ log.warn("onSyncFinished but we aren't recording");
+ return;
+ }
+ this.current.finished(error);
+ this.currentSyncNodeType = this.current.syncNodeType;
+ let current = this.current;
+ this.current = null;
+ this.takeTelemetryRecord(current);
+ }
+
+ takeTelemetryRecord(record) {
+ // We check for "data change" before appending the current sync to payloads,
+ // as it is the current sync which has the data with the new data, and thus
+ // must go in the *next* submission.
+ this.maybeSubmitForDataChange();
+ if (this.payloads.length < this.maxPayloadCount) {
+ this.payloads.push(record.toJSON());
+ } else {
+ ++this.discarded;
+ }
+ // If we are submitting due to timing, it's desirable that the most recent
+ // sync is included, so we check after appending the record.
+ this.maybeSubmitForInterval();
+ }
+
+ _addHistogram(hist) {
+ let histogram = Services.telemetry.getHistogramById(hist);
+ let s = histogram.snapshot();
+ this.histograms[hist] = s;
+ }
+
+ _recordEvent(eventDetails) {
+ this.maybeSubmitForDataChange();
+
+ if (this.events.length >= this.maxEventsCount) {
+ log.warn("discarding event - already queued our maximum", eventDetails);
+ return;
+ }
+
+ let { object, method, value, extra } = eventDetails;
+ if (extra) {
+ extra = normalizeExtraTelemetryFields(extra);
+ eventDetails = { object, method, value, extra };
+ }
+
+ if (!validateTelemetryEvent(eventDetails)) {
+ // we've already logged what the problem is...
+ return;
+ }
+ log.debug("recording event", eventDetails);
+
+ if (extra && lazy.Resource.serverTime && !extra.serverTime) {
+ extra.serverTime = String(lazy.Resource.serverTime);
+ }
+ let category = "sync";
+ let ts = Math.floor(tryGetMonotonicTimestamp());
+
+ // An event record is a simple array with at least 4 items.
+ let event = [ts, category, method, object];
+ // It may have up to 6 elements if |extra| is defined
+ if (value) {
+ event.push(value);
+ if (extra) {
+ event.push(extra);
+ }
+ } else if (extra) {
+ event.push(null); // a null for the empty value.
+ event.push(extra);
+ }
+ this.events.push(event);
+ this.maybeSubmitForInterval();
+ }
+
+ observe(subject, topic, data) {
+ log.trace(`observed ${topic} ${data}`);
+
+ switch (topic) {
+ case "weave:service:ready":
+ case "weave:service:login:got-hashed-id":
+ case "fxaccounts:new_device_id":
+ this.onAccountInitOrChange();
+ break;
+
+ case "fxaccounts:onlogout":
+ this.onAccountLogout();
+ break;
+
+ /* sync itself state changes */
+ case "weave:service:sync:start":
+ this.onSyncStarted(data);
+ break;
+
+ case "weave:service:sync:finish":
+ if (this._checkCurrent(topic)) {
+ this.onSyncFinished(null);
+ }
+ break;
+
+ case "weave:service:sync:error":
+ // argument needs to be truthy (this should always be the case)
+ this.onSyncFinished(subject || "Unknown");
+ break;
+
+ /* engine sync state changes */
+ case "weave:engine:sync:start":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStart(data);
+ }
+ break;
+ case "weave:engine:sync:finish":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStop(data, null);
+ }
+ break;
+
+ case "weave:engine:sync:error":
+ if (this._checkCurrent(topic)) {
+ // argument needs to be truthy (this should always be the case)
+ this.current.onEngineStop(data, subject || "Unknown");
+ }
+ break;
+
+ /* engine counts */
+ case "weave:engine:sync:applied":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineApplied(data, subject);
+ }
+ break;
+
+ case "weave:engine:sync:step":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStep(data, subject);
+ }
+ break;
+
+ case "weave:engine:sync:uploaded":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineUploaded(data, subject);
+ }
+ break;
+
+ case "weave:engine:validate:finish":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineValidated(data, subject);
+ }
+ break;
+
+ case "weave:engine:validate:error":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineValidateError(data, subject || "Unknown");
+ }
+ break;
+
+ case "weave:telemetry:event":
+ case "fxa:telemetry:event":
+ this._recordEvent(subject);
+ break;
+
+ case "weave:telemetry:histogram":
+ this._addHistogram(data);
+ break;
+
+ case "weave:telemetry:migration":
+ this._addMigrationRecord(data, subject);
+ break;
+
+ default:
+ log.warn(`unexpected observer topic ${topic}`);
+ break;
+ }
+ }
+
+ // Transform an exception into a standard description. Exposed here for when
+ // this module isn't directly responsible for knowing the transform should
+ // happen (for example, when including an error in the |extra| field of
+ // event telemetry)
+ transformError(error) {
+ // Certain parts of sync will use this pattern as a way to communicate to
+ // processIncoming to abort the processing. However, there's no guarantee
+ // this can only happen then.
+ if (typeof error == "object" && error.code && error.cause) {
+ error = error.cause;
+ }
+ if (lazy.Async.isShutdownException(error)) {
+ return { name: "shutdownerror" };
+ }
+
+ if (typeof error === "string") {
+ if (error.startsWith("error.")) {
+ // This is hacky, but I can't imagine that it's not also accurate.
+ return { name: "othererror", error };
+ }
+ error = ErrorSanitizer.cleanErrorMessage(error);
+ return { name: "unexpectederror", error };
+ }
+
+ if (error instanceof lazy.AuthenticationError) {
+ return { name: "autherror", from: error.source };
+ }
+
+ if (DOMException.isInstance(error)) {
+ return {
+ name: "unexpectederror",
+ error: ErrorSanitizer.cleanErrorMessage(error.message, error),
+ };
+ }
+
+ let httpCode =
+ error.status || (error.response && error.response.status) || error.code;
+
+ if (httpCode) {
+ return { name: "httperror", code: httpCode };
+ }
+
+ if (error.failureCode) {
+ return { name: "othererror", error: error.failureCode };
+ }
+
+ if (error.result) {
+ // many "nsresult" errors are actually network errors - if they are
+ // associated with the "network" module we assume that's true.
+ // We also assume NS_ERROR_ABORT is such an error - for almost everything
+ // we care about, it acually is (eg, if the connection fails early enough
+ // or if we have a captive portal etc) - we don't lose anything by this
+ // assumption, it's just that the error will no longer be in the "nserror"
+ // category, so our analysis can still find them.
+ if (
+ error.result == Cr.NS_ERROR_ABORT ||
+ NS_ERROR_GET_MODULE(error.result) == NS_ERROR_MODULE_NETWORK
+ ) {
+ return { name: "httperror", code: error.result };
+ }
+ return { name: "nserror", code: error.result };
+ }
+ // It's probably an Error object, but it also could be some
+ // other object that may or may not override toString to do
+ // something useful.
+ let msg = String(error);
+ if (msg.startsWith("[object")) {
+ // Nothing useful in the default, check for a string "message" property.
+ if (typeof error.message == "string") {
+ msg = String(error.message);
+ } else {
+ // Hopefully it won't come to this...
+ msg = JSON.stringify(error);
+ }
+ }
+ return {
+ name: "unexpectederror",
+ error: ErrorSanitizer.cleanErrorMessage(msg),
+ };
+ }
+}
+
+export var SyncTelemetry = new SyncTelemetryImpl(ENGINES);
diff --git a/services/sync/modules/util.sys.mjs b/services/sync/modules/util.sys.mjs
new file mode 100644
index 0000000000..309eddefb2
--- /dev/null
+++ b/services/sync/modules/util.sys.mjs
@@ -0,0 +1,783 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+import { Observers } from "resource://services-common/observers.sys.mjs";
+
+import { CommonUtils } from "resource://services-common/utils.sys.mjs";
+import { CryptoUtils } from "resource://services-crypto/utils.sys.mjs";
+
+import {
+ DEVICE_TYPE_DESKTOP,
+ MAXIMUM_BACKOFF_INTERVAL,
+ PREFS_BRANCH,
+ SYNC_KEY_DECODED_LENGTH,
+ SYNC_KEY_ENCODED_LENGTH,
+ WEAVE_VERSION,
+} from "resource://services-sync/constants.sys.mjs";
+
+import { Preferences } from "resource://gre/modules/Preferences.sys.mjs";
+import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
+
+const lazy = {};
+const FxAccountsCommon = ChromeUtils.import(
+ "resource://gre/modules/FxAccountsCommon.js"
+);
+
+XPCOMUtils.defineLazyServiceGetter(
+ lazy,
+ "cryptoSDR",
+ "@mozilla.org/login-manager/crypto/SDR;1",
+ "nsILoginManagerCrypto"
+);
+
+XPCOMUtils.defineLazyPreferenceGetter(
+ lazy,
+ "localDeviceType",
+ "services.sync.client.type",
+ DEVICE_TYPE_DESKTOP
+);
+
+/*
+ * Custom exception types.
+ */
+class LockException extends Error {
+ constructor(message) {
+ super(message);
+ this.name = "LockException";
+ }
+}
+
+class HMACMismatch extends Error {
+ constructor(message) {
+ super(message);
+ this.name = "HMACMismatch";
+ }
+}
+
+/*
+ * Utility functions
+ */
+export var Utils = {
+ // Aliases from CryptoUtils.
+ generateRandomBytesLegacy: CryptoUtils.generateRandomBytesLegacy,
+ computeHTTPMACSHA1: CryptoUtils.computeHTTPMACSHA1,
+ digestUTF8: CryptoUtils.digestUTF8,
+ digestBytes: CryptoUtils.digestBytes,
+ sha256: CryptoUtils.sha256,
+ hkdfExpand: CryptoUtils.hkdfExpand,
+ pbkdf2Generate: CryptoUtils.pbkdf2Generate,
+ getHTTPMACSHA1Header: CryptoUtils.getHTTPMACSHA1Header,
+
+ /**
+ * The string to use as the base User-Agent in Sync requests.
+ * This string will look something like
+ *
+ * Firefox/49.0a1 (Windows NT 6.1; WOW64; rv:46.0) FxSync/1.51.0.20160516142357.desktop
+ */
+ _userAgent: null,
+ get userAgent() {
+ if (!this._userAgent) {
+ let hph = Cc["@mozilla.org/network/protocol;1?name=http"].getService(
+ Ci.nsIHttpProtocolHandler
+ );
+ /* eslint-disable no-multi-spaces */
+ this._userAgent =
+ Services.appinfo.name +
+ "/" +
+ Services.appinfo.version + // Product.
+ " (" +
+ hph.oscpu +
+ ")" + // (oscpu)
+ " FxSync/" +
+ WEAVE_VERSION +
+ "." + // Sync.
+ Services.appinfo.appBuildID +
+ "."; // Build.
+ /* eslint-enable no-multi-spaces */
+ }
+ return this._userAgent + lazy.localDeviceType;
+ },
+
+ /**
+ * Wrap a [promise-returning] function to catch all exceptions and log them.
+ *
+ * @usage MyObj._catch = Utils.catch;
+ * MyObj.foo = function() { this._catch(func)(); }
+ *
+ * Optionally pass a function which will be called if an
+ * exception occurs.
+ */
+ catch(func, exceptionCallback) {
+ let thisArg = this;
+ return async function WrappedCatch() {
+ try {
+ return await func.call(thisArg);
+ } catch (ex) {
+ thisArg._log.debug(
+ "Exception calling " + (func.name || "anonymous function"),
+ ex
+ );
+ if (exceptionCallback) {
+ return exceptionCallback.call(thisArg, ex);
+ }
+ return null;
+ }
+ };
+ },
+
+ throwLockException(label) {
+ throw new LockException(`Could not acquire lock. Label: "${label}".`);
+ },
+
+ /**
+ * Wrap a [promise-returning] function to call lock before calling the function
+ * then unlock when it finishes executing or if it threw an error.
+ *
+ * @usage MyObj._lock = Utils.lock;
+ * MyObj.foo = async function() { await this._lock(func)(); }
+ */
+ lock(label, func) {
+ let thisArg = this;
+ return async function WrappedLock() {
+ if (!thisArg.lock()) {
+ Utils.throwLockException(label);
+ }
+
+ try {
+ return await func.call(thisArg);
+ } finally {
+ thisArg.unlock();
+ }
+ };
+ },
+
+ isLockException: function isLockException(ex) {
+ return ex instanceof LockException;
+ },
+
+ /**
+ * Wrap [promise-returning] functions to notify when it starts and
+ * finishes executing or if it threw an error.
+ *
+ * The message is a combination of a provided prefix, the local name, and
+ * the event. Possible events are: "start", "finish", "error". The subject
+ * is the function's return value on "finish" or the caught exception on
+ * "error". The data argument is the predefined data value.
+ *
+ * Example:
+ *
+ * @usage function MyObj(name) {
+ * this.name = name;
+ * this._notify = Utils.notify("obj:");
+ * }
+ * MyObj.prototype = {
+ * foo: function() this._notify("func", "data-arg", async function () {
+ * //...
+ * }(),
+ * };
+ */
+ notify(prefix) {
+ return function NotifyMaker(name, data, func) {
+ let thisArg = this;
+ let notify = function (state, subject) {
+ let mesg = prefix + name + ":" + state;
+ thisArg._log.trace("Event: " + mesg);
+ Observers.notify(mesg, subject, data);
+ };
+
+ return async function WrappedNotify() {
+ notify("start", null);
+ try {
+ let ret = await func.call(thisArg);
+ notify("finish", ret);
+ return ret;
+ } catch (ex) {
+ notify("error", ex);
+ throw ex;
+ }
+ };
+ };
+ },
+
+ /**
+ * GUIDs are 9 random bytes encoded with base64url (RFC 4648).
+ * That makes them 12 characters long with 72 bits of entropy.
+ */
+ makeGUID: function makeGUID() {
+ return CommonUtils.encodeBase64URL(Utils.generateRandomBytesLegacy(9));
+ },
+
+ _base64url_regex: /^[-abcdefghijklmnopqrstuvwxyz0123456789_]{12}$/i,
+ checkGUID: function checkGUID(guid) {
+ return !!guid && this._base64url_regex.test(guid);
+ },
+
+ /**
+ * Add a simple getter/setter to an object that defers access of a property
+ * to an inner property.
+ *
+ * @param obj
+ * Object to add properties to defer in its prototype
+ * @param defer
+ * Property of obj to defer to
+ * @param prop
+ * Property name to defer (or an array of property names)
+ */
+ deferGetSet: function Utils_deferGetSet(obj, defer, prop) {
+ if (Array.isArray(prop)) {
+ return prop.map(prop => Utils.deferGetSet(obj, defer, prop));
+ }
+
+ let prot = obj.prototype;
+
+ // Create a getter if it doesn't exist yet
+ if (!prot.__lookupGetter__(prop)) {
+ prot.__defineGetter__(prop, function () {
+ return this[defer][prop];
+ });
+ }
+
+ // Create a setter if it doesn't exist yet
+ if (!prot.__lookupSetter__(prop)) {
+ prot.__defineSetter__(prop, function (val) {
+ this[defer][prop] = val;
+ });
+ }
+ },
+
+ deepEquals: function eq(a, b) {
+ // If they're triple equals, then it must be equals!
+ if (a === b) {
+ return true;
+ }
+
+ // If they weren't equal, they must be objects to be different
+ if (typeof a != "object" || typeof b != "object") {
+ return false;
+ }
+
+ // But null objects won't have properties to compare
+ if (a === null || b === null) {
+ return false;
+ }
+
+ // Make sure all of a's keys have a matching value in b
+ for (let k in a) {
+ if (!eq(a[k], b[k])) {
+ return false;
+ }
+ }
+
+ // Do the same for b's keys but skip those that we already checked
+ for (let k in b) {
+ if (!(k in a) && !eq(a[k], b[k])) {
+ return false;
+ }
+ }
+
+ return true;
+ },
+
+ // Generator and discriminator for HMAC exceptions.
+ // Split these out in case we want to make them richer in future, and to
+ // avoid inevitable confusion if the message changes.
+ throwHMACMismatch: function throwHMACMismatch(shouldBe, is) {
+ throw new HMACMismatch(
+ `Record SHA256 HMAC mismatch: should be ${shouldBe}, is ${is}`
+ );
+ },
+
+ isHMACMismatch: function isHMACMismatch(ex) {
+ return ex instanceof HMACMismatch;
+ },
+
+ /**
+ * Turn RFC 4648 base32 into our own user-friendly version.
+ * ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
+ * becomes
+ * abcdefghijk8mn9pqrstuvwxyz234567
+ */
+ base32ToFriendly: function base32ToFriendly(input) {
+ return input.toLowerCase().replace(/l/g, "8").replace(/o/g, "9");
+ },
+
+ base32FromFriendly: function base32FromFriendly(input) {
+ return input.toUpperCase().replace(/8/g, "L").replace(/9/g, "O");
+ },
+
+ /**
+ * Key manipulation.
+ */
+
+ // Return an octet string in friendly base32 *with no trailing =*.
+ encodeKeyBase32: function encodeKeyBase32(keyData) {
+ return Utils.base32ToFriendly(CommonUtils.encodeBase32(keyData)).slice(
+ 0,
+ SYNC_KEY_ENCODED_LENGTH
+ );
+ },
+
+ decodeKeyBase32: function decodeKeyBase32(encoded) {
+ return CommonUtils.decodeBase32(
+ Utils.base32FromFriendly(Utils.normalizePassphrase(encoded))
+ ).slice(0, SYNC_KEY_DECODED_LENGTH);
+ },
+
+ jsonFilePath(...args) {
+ let [fileName] = args.splice(-1);
+
+ return PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...args,
+ `${fileName}.json`
+ );
+ },
+
+ /**
+ * Load a JSON file from disk in the profile directory.
+ *
+ * @param filePath
+ * JSON file path load from profile. Loaded file will be
+ * extension.
+ * @param that
+ * Object to use for logging.
+ *
+ * @return Promise<>
+ * Promise resolved when the write has been performed.
+ */
+ async jsonLoad(filePath, that) {
+ let path;
+ if (Array.isArray(filePath)) {
+ path = Utils.jsonFilePath(...filePath);
+ } else {
+ path = Utils.jsonFilePath(filePath);
+ }
+
+ if (that._log && that._log.trace) {
+ that._log.trace("Loading json from disk: " + path);
+ }
+
+ try {
+ return await IOUtils.readJSON(path);
+ } catch (e) {
+ if (!DOMException.isInstance(e) || e.name !== "NotFoundError") {
+ if (that._log) {
+ that._log.debug("Failed to load json", e);
+ }
+ }
+ return null;
+ }
+ },
+
+ /**
+ * Save a json-able object to disk in the profile directory.
+ *
+ * @param filePath
+ * JSON file path save to <filePath>.json
+ * @param that
+ * Object to use for logging.
+ * @param obj
+ * Function to provide json-able object to save. If this isn't a
+ * function, it'll be used as the object to make a json string.*
+ * Function called when the write has been performed. Optional.
+ *
+ * @return Promise<>
+ * Promise resolved when the write has been performed.
+ */
+ async jsonSave(filePath, that, obj) {
+ let path = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(filePath + ".json").split("/")
+ );
+ let dir = PathUtils.parent(path);
+
+ await IOUtils.makeDirectory(dir, { createAncestors: true });
+
+ if (that._log) {
+ that._log.trace("Saving json to disk: " + path);
+ }
+
+ let json = typeof obj == "function" ? obj.call(that) : obj;
+
+ return IOUtils.writeJSON(path, json);
+ },
+
+ /**
+ * Helper utility function to fit an array of records so that when serialized,
+ * they will be within payloadSizeMaxBytes. Returns a new array without the
+ * items.
+ *
+ * Note: This shouldn't be used for extremely large record sizes as
+ * it uses JSON.stringify, which could lead to a heavy performance hit.
+ * See Bug 1815151 for more details.
+ *
+ */
+ tryFitItems(records, payloadSizeMaxBytes) {
+ // Copy this so that callers don't have to do it in advance.
+ records = records.slice();
+ let encoder = Utils.utf8Encoder;
+ const computeSerializedSize = () =>
+ encoder.encode(JSON.stringify(records)).byteLength;
+ // Figure out how many records we can pack into a payload.
+ // We use byteLength here because the data is not encrypted in ascii yet.
+ let size = computeSerializedSize();
+ // See bug 535326 comment 8 for an explanation of the estimation
+ const maxSerializedSize = (payloadSizeMaxBytes / 4) * 3 - 1500;
+ if (maxSerializedSize < 0) {
+ // This is probably due to a test, but it causes very bad behavior if a
+ // test causes this accidentally. We could throw, but there's an obvious/
+ // natural way to handle it, so we do that instead (otherwise we'd have a
+ // weird lower bound of ~1125b on the max record payload size).
+ return [];
+ }
+ if (size > maxSerializedSize) {
+ // Estimate a little more than the direct fraction to maximize packing
+ let cutoff = Math.ceil((records.length * maxSerializedSize) / size);
+ records = records.slice(0, cutoff + 1);
+
+ // Keep dropping off the last entry until the data fits.
+ while (computeSerializedSize() > maxSerializedSize) {
+ records.pop();
+ }
+ }
+ return records;
+ },
+
+ /**
+ * Move a json file in the profile directory. Will fail if a file exists at the
+ * destination.
+ *
+ * @returns a promise that resolves to undefined on success, or rejects on failure
+ *
+ * @param aFrom
+ * Current path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param aTo
+ * New path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param that
+ * Object to use for logging
+ */
+ jsonMove(aFrom, aTo, that) {
+ let pathFrom = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(aFrom + ".json").split("/")
+ );
+ let pathTo = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(aTo + ".json").split("/")
+ );
+ if (that._log) {
+ that._log.trace("Moving " + pathFrom + " to " + pathTo);
+ }
+ return IOUtils.move(pathFrom, pathTo, { noOverwrite: true });
+ },
+
+ /**
+ * Removes a json file in the profile directory.
+ *
+ * @returns a promise that resolves to undefined on success, or rejects on failure
+ *
+ * @param filePath
+ * Current path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param that
+ * Object to use for logging
+ */
+ jsonRemove(filePath, that) {
+ let path = PathUtils.join(
+ PathUtils.profileDir,
+ "weave",
+ ...(filePath + ".json").split("/")
+ );
+ if (that._log) {
+ that._log.trace("Deleting " + path);
+ }
+ return IOUtils.remove(path, { ignoreAbsent: true });
+ },
+
+ /**
+ * The following are the methods supported for UI use:
+ *
+ * * isPassphrase:
+ * determines whether a string is either a normalized or presentable
+ * passphrase.
+ * * normalizePassphrase:
+ * take a presentable passphrase and reduce it to a normalized
+ * representation for storage. normalizePassphrase can safely be called
+ * on normalized input.
+ */
+
+ isPassphrase(s) {
+ if (s) {
+ return /^[abcdefghijkmnpqrstuvwxyz23456789]{26}$/.test(
+ Utils.normalizePassphrase(s)
+ );
+ }
+ return false;
+ },
+
+ normalizePassphrase: function normalizePassphrase(pp) {
+ // Short var name... have you seen the lines below?!
+ // Allow leading and trailing whitespace.
+ pp = pp.trim().toLowerCase();
+
+ // 20-char sync key.
+ if (pp.length == 23 && [5, 11, 17].every(i => pp[i] == "-")) {
+ return (
+ pp.slice(0, 5) + pp.slice(6, 11) + pp.slice(12, 17) + pp.slice(18, 23)
+ );
+ }
+
+ // "Modern" 26-char key.
+ if (pp.length == 31 && [1, 7, 13, 19, 25].every(i => pp[i] == "-")) {
+ return (
+ pp.slice(0, 1) +
+ pp.slice(2, 7) +
+ pp.slice(8, 13) +
+ pp.slice(14, 19) +
+ pp.slice(20, 25) +
+ pp.slice(26, 31)
+ );
+ }
+
+ // Something else -- just return.
+ return pp;
+ },
+
+ /**
+ * Create an array like the first but without elements of the second. Reuse
+ * arrays if possible.
+ */
+ arraySub: function arraySub(minuend, subtrahend) {
+ if (!minuend.length || !subtrahend.length) {
+ return minuend;
+ }
+ let setSubtrahend = new Set(subtrahend);
+ return minuend.filter(i => !setSubtrahend.has(i));
+ },
+
+ /**
+ * Build the union of two arrays. Reuse arrays if possible.
+ */
+ arrayUnion: function arrayUnion(foo, bar) {
+ if (!foo.length) {
+ return bar;
+ }
+ if (!bar.length) {
+ return foo;
+ }
+ return foo.concat(Utils.arraySub(bar, foo));
+ },
+
+ /**
+ * Add all the items in `items` to the provided Set in-place.
+ *
+ * @return The provided set.
+ */
+ setAddAll(set, items) {
+ for (let item of items) {
+ set.add(item);
+ }
+ return set;
+ },
+
+ /**
+ * Delete every items in `items` to the provided Set in-place.
+ *
+ * @return The provided set.
+ */
+ setDeleteAll(set, items) {
+ for (let item of items) {
+ set.delete(item);
+ }
+ return set;
+ },
+
+ /**
+ * Take the first `size` items from the Set `items`.
+ *
+ * @return A Set of size at most `size`
+ */
+ subsetOfSize(items, size) {
+ let result = new Set();
+ let count = 0;
+ for (let item of items) {
+ if (count++ == size) {
+ return result;
+ }
+ result.add(item);
+ }
+ return result;
+ },
+
+ bind2: function Async_bind2(object, method) {
+ return function innerBind() {
+ return method.apply(object, arguments);
+ };
+ },
+
+ /**
+ * Is there a master password configured and currently locked?
+ */
+ mpLocked() {
+ return !lazy.cryptoSDR.isLoggedIn;
+ },
+
+ // If Master Password is enabled and locked, present a dialog to unlock it.
+ // Return whether the system is unlocked.
+ ensureMPUnlocked() {
+ if (lazy.cryptoSDR.uiBusy) {
+ return false;
+ }
+ try {
+ lazy.cryptoSDR.encrypt("bacon");
+ return true;
+ } catch (e) {}
+ return false;
+ },
+
+ /**
+ * Return a value for a backoff interval. Maximum is eight hours, unless
+ * Status.backoffInterval is higher.
+ *
+ */
+ calculateBackoff: function calculateBackoff(
+ attempts,
+ baseInterval,
+ statusInterval
+ ) {
+ let backoffInterval =
+ attempts * (Math.floor(Math.random() * baseInterval) + baseInterval);
+ return Math.max(
+ Math.min(backoffInterval, MAXIMUM_BACKOFF_INTERVAL),
+ statusInterval
+ );
+ },
+
+ /**
+ * Return a set of hostnames (including the protocol) which may have
+ * credentials for sync itself stored in the login manager.
+ *
+ * In general, these hosts will not have their passwords synced, will be
+ * reset when we drop sync credentials, etc.
+ */
+ getSyncCredentialsHosts() {
+ let result = new Set();
+ // the FxA host
+ result.add(FxAccountsCommon.FXA_PWDMGR_HOST);
+ // We used to include the FxA hosts (hence the Set() result) but we now
+ // don't give them special treatment (hence the Set() with exactly 1 item)
+ return result;
+ },
+
+ /**
+ * Helper to implement a more efficient version of fairly common pattern:
+ *
+ * Utils.defineLazyIDProperty(this, "syncID", "services.sync.client.syncID")
+ *
+ * is equivalent to (but more efficient than) the following:
+ *
+ * Foo.prototype = {
+ * ...
+ * get syncID() {
+ * let syncID = Svc.Prefs.get("client.syncID", "");
+ * return syncID == "" ? this.syncID = Utils.makeGUID() : syncID;
+ * },
+ * set syncID(value) {
+ * Svc.Prefs.set("client.syncID", value);
+ * },
+ * ...
+ * };
+ */
+ defineLazyIDProperty(object, propName, prefName) {
+ // An object that exists to be the target of the lazy pref getter.
+ // We can't use `object` (at least, not using `propName`) since XPCOMUtils
+ // will stomp on any setter we define.
+ const storage = {};
+ XPCOMUtils.defineLazyPreferenceGetter(storage, "value", prefName, "");
+ Object.defineProperty(object, propName, {
+ configurable: true,
+ enumerable: true,
+ get() {
+ let value = storage.value;
+ if (!value) {
+ value = Utils.makeGUID();
+ Services.prefs.setStringPref(prefName, value);
+ }
+ return value;
+ },
+ set(value) {
+ Services.prefs.setStringPref(prefName, value);
+ },
+ });
+ },
+
+ getDeviceType() {
+ return lazy.localDeviceType;
+ },
+
+ formatTimestamp(date) {
+ // Format timestamp as: "%Y-%m-%d %H:%M:%S"
+ let year = String(date.getFullYear());
+ let month = String(date.getMonth() + 1).padStart(2, "0");
+ let day = String(date.getDate()).padStart(2, "0");
+ let hours = String(date.getHours()).padStart(2, "0");
+ let minutes = String(date.getMinutes()).padStart(2, "0");
+ let seconds = String(date.getSeconds()).padStart(2, "0");
+
+ return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`;
+ },
+
+ *walkTree(tree, parent = null) {
+ if (tree) {
+ // Skip root node
+ if (parent) {
+ yield [tree, parent];
+ }
+ if (tree.children) {
+ for (let child of tree.children) {
+ yield* Utils.walkTree(child, tree);
+ }
+ }
+ }
+ },
+};
+
+/**
+ * A subclass of Set that serializes as an Array when passed to JSON.stringify.
+ */
+export class SerializableSet extends Set {
+ toJSON() {
+ return Array.from(this);
+ }
+}
+
+XPCOMUtils.defineLazyGetter(Utils, "_utf8Converter", function () {
+ let converter = Cc[
+ "@mozilla.org/intl/scriptableunicodeconverter"
+ ].createInstance(Ci.nsIScriptableUnicodeConverter);
+ converter.charset = "UTF-8";
+ return converter;
+});
+
+XPCOMUtils.defineLazyGetter(Utils, "utf8Encoder", () => new TextEncoder());
+
+/*
+ * Commonly-used services
+ */
+export var Svc = {};
+
+Svc.Prefs = new Preferences(PREFS_BRANCH);
+Svc.Obs = Observers;
+
+Svc.Obs.add("xpcom-shutdown", function () {
+ for (let name in Svc) {
+ delete Svc[name];
+ }
+});