diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 01:47:29 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 01:47:29 +0000 |
commit | 0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d (patch) | |
tree | a31f07c9bcca9d56ce61e9a1ffd30ef350d513aa /third_party/rust/tabs/src/sync | |
parent | Initial commit. (diff) | |
download | firefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.tar.xz firefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.zip |
Adding upstream version 115.8.0esr.upstream/115.8.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/tabs/src/sync')
-rw-r--r-- | third_party/rust/tabs/src/sync/bridge.rs | 436 | ||||
-rw-r--r-- | third_party/rust/tabs/src/sync/engine.rs | 543 | ||||
-rw-r--r-- | third_party/rust/tabs/src/sync/full_sync.rs | 70 | ||||
-rw-r--r-- | third_party/rust/tabs/src/sync/mod.rs | 35 | ||||
-rw-r--r-- | third_party/rust/tabs/src/sync/record.rs | 97 |
5 files changed, 1181 insertions, 0 deletions
diff --git a/third_party/rust/tabs/src/sync/bridge.rs b/third_party/rust/tabs/src/sync/bridge.rs new file mode 100644 index 0000000000..6e37a0cf5c --- /dev/null +++ b/third_party/rust/tabs/src/sync/bridge.rs @@ -0,0 +1,436 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use crate::error::TabsApiError; +use crate::sync::engine::TabsSyncImpl; +use crate::TabsStore; +use anyhow::Result; +use std::sync::{Arc, Mutex}; +use sync15::bso::{IncomingBso, OutgoingBso}; +use sync15::engine::{ApplyResults, BridgedEngine, CollSyncIds, EngineSyncAssociation}; +use sync15::{ClientData, ServerTimestamp}; +use sync_guid::Guid as SyncGuid; + +impl TabsStore { + // Returns a bridged sync engine for Desktop for this store. + pub fn bridged_engine(self: Arc<Self>) -> Arc<TabsBridgedEngine> { + let bridge_impl = crate::sync::bridge::BridgedEngineImpl::new(&self); + // This is a concrete struct exposed via uniffi. + let concrete = TabsBridgedEngine::new(bridge_impl); + Arc::new(concrete) + } +} + +/// A bridged engine implements all the methods needed to make the +/// `storage.sync` store work with Desktop's Sync implementation. +/// Conceptually it's very similar to our SyncEngine, and once we have SyncEngine +/// and BridgedEngine using the same types we can probably combine them (or at least +/// implement this bridged engine purely in terms of SyncEngine) +/// See also #2841 and #5139 +pub struct BridgedEngineImpl { + sync_impl: Mutex<TabsSyncImpl>, + incoming: Mutex<Vec<IncomingBso>>, +} + +impl BridgedEngineImpl { + /// Creates a bridged engine for syncing. + pub fn new(store: &Arc<TabsStore>) -> Self { + Self { + sync_impl: Mutex::new(TabsSyncImpl::new(store.clone())), + incoming: Mutex::default(), + } + } +} + +impl BridgedEngine for BridgedEngineImpl { + fn last_sync(&self) -> Result<i64> { + Ok(self + .sync_impl + .lock() + .unwrap() + .get_last_sync()? + .unwrap_or_default() + .as_millis()) + } + + fn set_last_sync(&self, last_sync_millis: i64) -> Result<()> { + self.sync_impl + .lock() + .unwrap() + .set_last_sync(ServerTimestamp::from_millis(last_sync_millis))?; + Ok(()) + } + + fn sync_id(&self) -> Result<Option<String>> { + Ok(match self.sync_impl.lock().unwrap().get_sync_assoc()? { + EngineSyncAssociation::Connected(id) => Some(id.coll.to_string()), + EngineSyncAssociation::Disconnected => None, + }) + } + + fn reset_sync_id(&self) -> Result<String> { + let new_id = SyncGuid::random().to_string(); + let new_coll_ids = CollSyncIds { + global: SyncGuid::empty(), + coll: new_id.clone().into(), + }; + self.sync_impl + .lock() + .unwrap() + .reset(&EngineSyncAssociation::Connected(new_coll_ids))?; + Ok(new_id) + } + + fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> { + let mut sync_impl = self.sync_impl.lock().unwrap(); + let assoc = sync_impl.get_sync_assoc()?; + if matches!(assoc, EngineSyncAssociation::Connected(c) if c.coll == sync_id) { + log::debug!("ensure_current_sync_id is current"); + } else { + let new_coll_ids = CollSyncIds { + global: SyncGuid::empty(), + coll: sync_id.into(), + }; + sync_impl.reset(&EngineSyncAssociation::Connected(new_coll_ids))?; + } + Ok(sync_id.to_string()) + } + + fn prepare_for_sync(&self, client_data: &str) -> Result<()> { + let data: ClientData = serde_json::from_str(client_data)?; + Ok(self.sync_impl.lock().unwrap().prepare_for_sync(data)?) + } + + fn sync_started(&self) -> Result<()> { + // This is a no-op for the Tabs Engine + Ok(()) + } + + fn store_incoming(&self, incoming: Vec<IncomingBso>) -> Result<()> { + // Store the incoming payload in memory so we can use it in apply + *(self.incoming.lock().unwrap()) = incoming; + Ok(()) + } + + fn apply(&self) -> Result<ApplyResults> { + let mut incoming = self.incoming.lock().unwrap(); + // We've a reference to a Vec<> but it's owned by the mutex - swap the mutex owned + // value for an empty vec so we can consume the original. + let mut records = Vec::new(); + std::mem::swap(&mut records, &mut *incoming); + let mut telem = sync15::telemetry::Engine::new("tabs"); + + let mut sync_impl = self.sync_impl.lock().unwrap(); + let outgoing = sync_impl.apply_incoming(records, &mut telem)?; + + Ok(ApplyResults { + records: outgoing, + num_reconciled: Some(0), + }) + } + + fn set_uploaded(&self, server_modified_millis: i64, ids: &[SyncGuid]) -> Result<()> { + Ok(self + .sync_impl + .lock() + .unwrap() + .sync_finished(ServerTimestamp::from_millis(server_modified_millis), ids)?) + } + + fn sync_finished(&self) -> Result<()> { + *(self.incoming.lock().unwrap()) = Vec::default(); + Ok(()) + } + + fn reset(&self) -> Result<()> { + self.sync_impl + .lock() + .unwrap() + .reset(&EngineSyncAssociation::Disconnected)?; + Ok(()) + } + + fn wipe(&self) -> Result<()> { + self.sync_impl.lock().unwrap().wipe()?; + Ok(()) + } +} + +// This is for uniffi to expose, and does nothing than delegate back to the trait. +pub struct TabsBridgedEngine { + bridge_impl: BridgedEngineImpl, +} + +impl TabsBridgedEngine { + pub fn new(bridge_impl: BridgedEngineImpl) -> Self { + Self { bridge_impl } + } + + pub fn last_sync(&self) -> Result<i64> { + self.bridge_impl.last_sync() + } + + pub fn set_last_sync(&self, last_sync: i64) -> Result<()> { + self.bridge_impl.set_last_sync(last_sync) + } + + pub fn sync_id(&self) -> Result<Option<String>> { + self.bridge_impl.sync_id() + } + + pub fn reset_sync_id(&self) -> Result<String> { + self.bridge_impl.reset_sync_id() + } + + pub fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> { + self.bridge_impl.ensure_current_sync_id(sync_id) + } + + pub fn prepare_for_sync(&self, client_data: &str) -> Result<()> { + self.bridge_impl.prepare_for_sync(client_data) + } + + pub fn sync_started(&self) -> Result<()> { + self.bridge_impl.sync_started() + } + + // Decode the JSON-encoded IncomingBso's that UniFFI passes to us + fn convert_incoming_bsos(&self, incoming: Vec<String>) -> Result<Vec<IncomingBso>> { + let mut bsos = Vec::with_capacity(incoming.len()); + for inc in incoming { + bsos.push(serde_json::from_str::<IncomingBso>(&inc)?); + } + Ok(bsos) + } + + // Encode OutgoingBso's into JSON for UniFFI + fn convert_outgoing_bsos(&self, outgoing: Vec<OutgoingBso>) -> Result<Vec<String>> { + let mut bsos = Vec::with_capacity(outgoing.len()); + for e in outgoing { + bsos.push(serde_json::to_string(&e)?); + } + Ok(bsos) + } + + pub fn store_incoming(&self, incoming: Vec<String>) -> Result<()> { + self.bridge_impl + .store_incoming(self.convert_incoming_bsos(incoming)?) + } + + pub fn apply(&self) -> Result<Vec<String>> { + let apply_results = self.bridge_impl.apply()?; + self.convert_outgoing_bsos(apply_results.records) + } + + pub fn set_uploaded(&self, server_modified_millis: i64, guids: Vec<SyncGuid>) -> Result<()> { + self.bridge_impl + .set_uploaded(server_modified_millis, &guids) + } + + pub fn sync_finished(&self) -> Result<()> { + self.bridge_impl.sync_finished() + } + + pub fn reset(&self) -> Result<()> { + self.bridge_impl.reset() + } + + pub fn wipe(&self) -> Result<()> { + self.bridge_impl.wipe() + } +} + +impl From<anyhow::Error> for TabsApiError { + fn from(value: anyhow::Error) -> Self { + TabsApiError::UnexpectedTabsError { + reason: value.to_string(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::storage::{RemoteTab, TABS_CLIENT_TTL}; + use crate::sync::record::TabsRecordTab; + use serde_json::json; + use std::collections::HashMap; + use sync15::{ClientData, DeviceType, RemoteClient}; + + // A copy of the normal "engine" tests but which go via the bridge + #[test] + fn test_sync_via_bridge() { + env_logger::try_init().ok(); + + let store = Arc::new(TabsStore::new_with_mem_path("test-bridge_incoming")); + + // Set some local tabs for our device. + let my_tabs = vec![ + RemoteTab { + title: "my first tab".to_string(), + url_history: vec!["http://1.com".to_string()], + icon: None, + last_used: 2, + }, + RemoteTab { + title: "my second tab".to_string(), + url_history: vec!["http://2.com".to_string()], + icon: None, + last_used: 1, + }, + ]; + store.set_local_tabs(my_tabs.clone()); + + let bridge = store.bridged_engine(); + + let client_data = ClientData { + local_client_id: "my-device".to_string(), + recent_clients: HashMap::from([ + ( + "my-device".to_string(), + RemoteClient { + fxa_device_id: None, + device_name: "my device".to_string(), + device_type: sync15::DeviceType::Unknown, + }, + ), + ( + "device-no-tabs".to_string(), + RemoteClient { + fxa_device_id: None, + device_name: "device with no tabs".to_string(), + device_type: DeviceType::Unknown, + }, + ), + ( + "device-with-a-tab".to_string(), + RemoteClient { + fxa_device_id: None, + device_name: "device with a tab".to_string(), + device_type: DeviceType::Unknown, + }, + ), + ]), + }; + bridge + .prepare_for_sync(&serde_json::to_string(&client_data).unwrap()) + .expect("should work"); + + let records = vec![ + // my-device should be ignored by sync - here it is acting as what our engine last + // wrote, but the actual tabs in our store we set above are what should be used. + json!({ + "id": "my-device", + "clientName": "my device", + "tabs": [{ + "title": "the title", + "urlHistory": [ + "https://mozilla.org/" + ], + "icon": "https://mozilla.org/icon", + "lastUsed": 1643764207 + }] + }), + json!({ + "id": "device-no-tabs", + "clientName": "device with no tabs", + "tabs": [], + }), + json!({ + "id": "device-with-a-tab", + "clientName": "device with a tab", + "tabs": [{ + "title": "the title", + "urlHistory": [ + "https://mozilla.org/" + ], + "icon": "https://mozilla.org/icon", + "lastUsed": 1643764207 + }] + }), + // This has the main payload as OK but the tabs part invalid. + json!({ + "id": "device-with-invalid-tab", + "clientName": "device with a tab", + "tabs": [{ + "foo": "bar", + }] + }), + // We want this to be a valid payload but an invalid tab - so it needs an ID. + json!({ + "id": "invalid-tab", + "foo": "bar" + }), + ]; + + let mut incoming = Vec::new(); + for record in records { + // Annoyingly we can't use `IncomingEnvelope` directly as it intentionally doesn't + // support Serialize - so need to use explicit json. + let envelope = json!({ + "id": record.get("id"), + "modified": 0, + "payload": serde_json::to_string(&record).unwrap(), + }); + incoming.push(serde_json::to_string(&envelope).unwrap()); + } + + bridge.store_incoming(incoming).expect("should store"); + + let out = bridge.apply().expect("should apply"); + + assert_eq!(out.len(), 1); + let ours = serde_json::from_str::<serde_json::Value>(&out[0]).unwrap(); + // As above, can't use `OutgoingEnvelope` as it doesn't Deserialize. + // First, convert my_tabs from the local `RemoteTab` to the Sync specific `TabsRecord` + let expected_tabs: Vec<TabsRecordTab> = + my_tabs.into_iter().map(|t| t.to_record_tab()).collect(); + let expected = json!({ + "id": "my-device".to_string(), + "payload": json!({ + "id": "my-device".to_string(), + "clientName": "my device", + "tabs": serde_json::to_value(expected_tabs).unwrap(), + }).to_string(), + "ttl": TABS_CLIENT_TTL, + }); + + assert_eq!(ours, expected); + bridge.set_uploaded(1234, vec![]).unwrap(); + assert_eq!(bridge.last_sync().unwrap(), 1234); + } + + #[test] + fn test_sync_meta() { + env_logger::try_init().ok(); + + let store = Arc::new(TabsStore::new_with_mem_path("test-meta")); + let bridge = store.bridged_engine(); + + // Should not error or panic + assert_eq!(bridge.last_sync().unwrap(), 0); + bridge.set_last_sync(3).unwrap(); + assert_eq!(bridge.last_sync().unwrap(), 3); + + assert!(bridge.sync_id().unwrap().is_none()); + + bridge.ensure_current_sync_id("some_guid").unwrap(); + assert_eq!(bridge.sync_id().unwrap(), Some("some_guid".to_string())); + // changing the sync ID should reset the timestamp + assert_eq!(bridge.last_sync().unwrap(), 0); + bridge.set_last_sync(3).unwrap(); + + bridge.reset_sync_id().unwrap(); + // should now be a random guid. + assert_ne!(bridge.sync_id().unwrap(), Some("some_guid".to_string())); + // should have reset the last sync timestamp. + assert_eq!(bridge.last_sync().unwrap(), 0); + bridge.set_last_sync(3).unwrap(); + + // `reset` clears the guid and the timestamp + bridge.reset().unwrap(); + assert_eq!(bridge.last_sync().unwrap(), 0); + assert!(bridge.sync_id().unwrap().is_none()); + } +} diff --git a/third_party/rust/tabs/src/sync/engine.rs b/third_party/rust/tabs/src/sync/engine.rs new file mode 100644 index 0000000000..bc0978cbf2 --- /dev/null +++ b/third_party/rust/tabs/src/sync/engine.rs @@ -0,0 +1,543 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use crate::schema; +use crate::storage::{ClientRemoteTabs, RemoteTab, TABS_CLIENT_TTL}; +use crate::store::TabsStore; +use crate::sync::record::{TabsRecord, TabsRecordTab}; +use crate::Result; +use std::collections::HashMap; +use std::sync::{Arc, Mutex, Weak}; +use sync15::bso::{IncomingBso, OutgoingBso, OutgoingEnvelope}; +use sync15::engine::{ + CollSyncIds, CollectionRequest, EngineSyncAssociation, IncomingChangeset, OutgoingChangeset, + SyncEngine, SyncEngineId, +}; +use sync15::{telemetry, ClientData, DeviceType, RemoteClient, ServerTimestamp}; +use sync_guid::Guid; + +// Our "sync manager" will use whatever is stashed here. +lazy_static::lazy_static! { + // Mutex: just taken long enough to update the inner stuff + static ref STORE_FOR_MANAGER: Mutex<Weak<TabsStore>> = Mutex::new(Weak::new()); +} + +/// Called by the sync manager to get a sync engine via the store previously +/// registered with the sync manager. +pub fn get_registered_sync_engine(engine_id: &SyncEngineId) -> Option<Box<dyn SyncEngine>> { + let weak = STORE_FOR_MANAGER.lock().unwrap(); + match weak.upgrade() { + None => None, + Some(store) => match engine_id { + SyncEngineId::Tabs => Some(Box::new(TabsEngine::new(Arc::clone(&store)))), + // panicing here seems reasonable - it's a static error if this + // it hit, not something that runtime conditions can influence. + _ => unreachable!("can't provide unknown engine: {}", engine_id), + }, + } +} + +impl ClientRemoteTabs { + pub(crate) fn from_record_with_remote_client( + client_id: String, + last_modified: ServerTimestamp, + remote_client: &RemoteClient, + record: TabsRecord, + ) -> Self { + Self { + client_id, + client_name: remote_client.device_name.clone(), + device_type: remote_client.device_type, + last_modified: last_modified.as_millis(), + remote_tabs: record.tabs.iter().map(RemoteTab::from_record_tab).collect(), + } + } + + // Note that this should die as part of https://github.com/mozilla/application-services/issues/5199 + // If we don't have a `RemoteClient` record, then we don't know whether the ID passed here is + // the fxa_device_id (which is must be) or the client_id (which it will be if this ends up being + // called for desktop records, where client_id != fxa_device_id) + pub(crate) fn from_record( + client_id: String, + last_modified: ServerTimestamp, + record: TabsRecord, + ) -> Self { + Self { + client_id, + client_name: record.client_name, + device_type: DeviceType::Unknown, + last_modified: last_modified.as_millis(), + remote_tabs: record.tabs.iter().map(RemoteTab::from_record_tab).collect(), + } + } + fn to_record(&self) -> TabsRecord { + TabsRecord { + id: self.client_id.clone(), + client_name: self.client_name.clone(), + tabs: self + .remote_tabs + .iter() + .map(RemoteTab::to_record_tab) + .collect(), + } + } +} + +impl RemoteTab { + pub(crate) fn from_record_tab(tab: &TabsRecordTab) -> Self { + Self { + title: tab.title.clone(), + url_history: tab.url_history.clone(), + icon: tab.icon.clone(), + last_used: tab.last_used.checked_mul(1000).unwrap_or_default(), + } + } + pub(super) fn to_record_tab(&self) -> TabsRecordTab { + TabsRecordTab { + title: self.title.clone(), + url_history: self.url_history.clone(), + icon: self.icon.clone(), + last_used: self.last_used.checked_div(1000).unwrap_or_default(), + } + } +} + +// This is the implementation of syncing, which is used by the 2 different "sync engines" +// (We hope to get these 2 engines even closer in the future, but for now, we suck this up) +pub struct TabsSyncImpl { + pub(super) store: Arc<TabsStore>, + pub(super) local_id: String, +} + +impl TabsSyncImpl { + pub fn new(store: Arc<TabsStore>) -> Self { + Self { + store, + local_id: Default::default(), + } + } + + pub fn prepare_for_sync(&mut self, client_data: ClientData) -> Result<()> { + let mut storage = self.store.storage.lock().unwrap(); + // We only know the client list at sync time, but need to return tabs potentially + // at any time -- so we store the clients in the meta table to be able to properly + // return a ClientRemoteTab struct + storage.put_meta( + schema::REMOTE_CLIENTS_KEY, + &serde_json::to_string(&client_data.recent_clients)?, + )?; + self.local_id = client_data.local_client_id; + Ok(()) + } + + pub fn apply_incoming( + &mut self, + inbound: Vec<IncomingBso>, + telem: &mut telemetry::Engine, + ) -> Result<Vec<OutgoingBso>> { + let local_id = self.local_id.clone(); + let mut remote_tabs = Vec::with_capacity(inbound.len()); + let mut incoming_telemetry = telemetry::EngineIncoming::new(); + + let remote_clients: HashMap<String, RemoteClient> = { + let mut storage = self.store.storage.lock().unwrap(); + match storage.get_meta::<String>(schema::REMOTE_CLIENTS_KEY)? { + None => HashMap::default(), + Some(json) => serde_json::from_str(&json).unwrap(), + } + }; + for incoming in inbound { + if incoming.envelope.id == local_id { + // That's our own record, ignore it. + continue; + } + let modified = incoming.envelope.modified; + let record = match incoming.into_content::<TabsRecord>().content() { + Some(record) => record, + None => { + // Invalid record or a "tombstone" which tabs don't have. + log::warn!("Ignoring incoming invalid tab"); + incoming_telemetry.failed(1); + continue; + } + }; + remote_tabs.push((record, modified)); + } + + // We want to keep the mutex for as short as possible + let local_tabs = { + let mut storage = self.store.storage.lock().unwrap(); + // In desktop we might end up here with zero records when doing a quick-write, in + // which case we don't want to wipe the DB. + if !remote_tabs.is_empty() { + storage.replace_remote_tabs(remote_tabs)?; + } + storage.remove_stale_clients()?; + storage.prepare_local_tabs_for_upload() + }; + let outgoing = if let Some(local_tabs) = local_tabs { + let (client_name, device_type) = remote_clients + .get(&local_id) + .map(|client| (client.device_name.clone(), client.device_type)) + .unwrap_or_else(|| (String::new(), DeviceType::Unknown)); + let local_record = ClientRemoteTabs { + client_id: local_id.clone(), + client_name, + device_type, + last_modified: 0, // ignored for outgoing records. + remote_tabs: local_tabs.to_vec(), + }; + log::trace!("outgoing {:?}", local_record); + let envelope = OutgoingEnvelope { + id: local_id.into(), + ttl: Some(TABS_CLIENT_TTL), + ..Default::default() + }; + vec![OutgoingBso::from_content( + envelope, + local_record.to_record(), + )?] + } else { + vec![] + }; + telem.incoming(incoming_telemetry); + Ok(outgoing) + } + + pub fn sync_finished( + &mut self, + new_timestamp: ServerTimestamp, + records_synced: &[Guid], + ) -> Result<()> { + log::info!( + "sync completed after uploading {} records", + records_synced.len() + ); + self.set_last_sync(new_timestamp)?; + Ok(()) + } + + pub fn reset(&mut self, assoc: &EngineSyncAssociation) -> Result<()> { + self.set_last_sync(ServerTimestamp(0))?; + let mut storage = self.store.storage.lock().unwrap(); + storage.delete_meta(schema::REMOTE_CLIENTS_KEY)?; + storage.wipe_remote_tabs()?; + match assoc { + EngineSyncAssociation::Disconnected => { + storage.delete_meta(schema::GLOBAL_SYNCID_META_KEY)?; + storage.delete_meta(schema::COLLECTION_SYNCID_META_KEY)?; + } + EngineSyncAssociation::Connected(ids) => { + storage.put_meta(schema::GLOBAL_SYNCID_META_KEY, &ids.global.to_string())?; + storage.put_meta(schema::COLLECTION_SYNCID_META_KEY, &ids.coll.to_string())?; + } + }; + Ok(()) + } + + pub fn wipe(&mut self) -> Result<()> { + self.reset(&EngineSyncAssociation::Disconnected)?; + // not clear why we need to wipe the local tabs - the app is just going + // to re-add them? + self.store.storage.lock().unwrap().wipe_local_tabs(); + Ok(()) + } + + pub fn get_sync_assoc(&self) -> Result<EngineSyncAssociation> { + let mut storage = self.store.storage.lock().unwrap(); + let global = storage.get_meta::<String>(schema::GLOBAL_SYNCID_META_KEY)?; + let coll = storage.get_meta::<String>(schema::COLLECTION_SYNCID_META_KEY)?; + Ok(if let (Some(global), Some(coll)) = (global, coll) { + EngineSyncAssociation::Connected(CollSyncIds { + global: Guid::from_string(global), + coll: Guid::from_string(coll), + }) + } else { + EngineSyncAssociation::Disconnected + }) + } + + pub fn set_last_sync(&self, last_sync: ServerTimestamp) -> Result<()> { + let mut storage = self.store.storage.lock().unwrap(); + log::debug!("Updating last sync to {}", last_sync); + let last_sync_millis = last_sync.as_millis(); + storage.put_meta(schema::LAST_SYNC_META_KEY, &last_sync_millis) + } + + pub fn get_last_sync(&self) -> Result<Option<ServerTimestamp>> { + let mut storage = self.store.storage.lock().unwrap(); + let millis = storage.get_meta::<i64>(schema::LAST_SYNC_META_KEY)?; + Ok(millis.map(ServerTimestamp)) + } +} + +// This is the "SyncEngine" used when syncing via the Sync Manager. +pub struct TabsEngine { + pub sync_impl: Mutex<TabsSyncImpl>, +} + +impl TabsEngine { + pub fn new(store: Arc<TabsStore>) -> Self { + Self { + sync_impl: Mutex::new(TabsSyncImpl::new(store)), + } + } +} + +impl SyncEngine for TabsEngine { + fn collection_name(&self) -> std::borrow::Cow<'static, str> { + "tabs".into() + } + + fn prepare_for_sync(&self, get_client_data: &dyn Fn() -> ClientData) -> anyhow::Result<()> { + Ok(self + .sync_impl + .lock() + .unwrap() + .prepare_for_sync(get_client_data())?) + } + + fn apply_incoming( + &self, + inbound: Vec<IncomingChangeset>, + telem: &mut telemetry::Engine, + ) -> anyhow::Result<OutgoingChangeset> { + assert_eq!(inbound.len(), 1, "only requested one set of records"); + let inbound = inbound.into_iter().next().unwrap(); + let outgoing_records = self + .sync_impl + .lock() + .unwrap() + .apply_incoming(inbound.changes, telem)?; + + Ok(OutgoingChangeset::new("tabs".into(), outgoing_records)) + } + + fn sync_finished( + &self, + new_timestamp: ServerTimestamp, + records_synced: Vec<Guid>, + ) -> anyhow::Result<()> { + Ok(self + .sync_impl + .lock() + .unwrap() + .sync_finished(new_timestamp, &records_synced)?) + } + + fn get_collection_requests( + &self, + server_timestamp: ServerTimestamp, + ) -> anyhow::Result<Vec<CollectionRequest>> { + let since = self + .sync_impl + .lock() + .unwrap() + .get_last_sync()? + .unwrap_or_default(); + Ok(if since == server_timestamp { + vec![] + } else { + vec![CollectionRequest::new("tabs".into()) + .full() + .newer_than(since)] + }) + } + + fn get_sync_assoc(&self) -> anyhow::Result<EngineSyncAssociation> { + Ok(self.sync_impl.lock().unwrap().get_sync_assoc()?) + } + + fn reset(&self, assoc: &EngineSyncAssociation) -> anyhow::Result<()> { + Ok(self.sync_impl.lock().unwrap().reset(assoc)?) + } + + fn wipe(&self) -> anyhow::Result<()> { + Ok(self.sync_impl.lock().unwrap().wipe()?) + } +} + +impl crate::TabsStore { + // This allows the embedding app to say "make this instance available to + // the sync manager". The implementation is more like "offer to sync mgr" + // (thereby avoiding us needing to link with the sync manager) but + // `register_with_sync_manager()` is logically what's happening so that's + // the name it gets. + pub fn register_with_sync_manager(self: Arc<Self>) { + let mut state = STORE_FOR_MANAGER.lock().unwrap(); + *state = Arc::downgrade(&self); + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use serde_json::json; + use sync15::bso::IncomingBso; + + #[test] + fn test_incoming_tabs() { + env_logger::try_init().ok(); + + let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path("test-incoming"))); + + let records = vec![ + json!({ + "id": "device-no-tabs", + "clientName": "device with no tabs", + "tabs": [], + }), + json!({ + "id": "device-with-a-tab", + "clientName": "device with a tab", + "tabs": [{ + "title": "the title", + "urlHistory": [ + "https://mozilla.org/" + ], + "icon": "https://mozilla.org/icon", + "lastUsed": 1643764207 + }] + }), + // test an updated payload will replace the previous record + json!({ + "id": "device-with-a-tab", + "clientName": "device with an updated tab", + "tabs": [{ + "title": "the new title", + "urlHistory": [ + "https://mozilla.org/" + ], + "icon": "https://mozilla.org/icon", + "lastUsed": 1643764208 + }] + }), + // This has the main payload as OK but the tabs part invalid. + json!({ + "id": "device-with-invalid-tab", + "clientName": "device with a tab", + "tabs": [{ + "foo": "bar", + }] + }), + // We want this to be a valid payload but an invalid tab - so it needs an ID. + json!({ + "id": "invalid-tab", + "foo": "bar" + }), + ]; + + let incoming = IncomingChangeset::new_with_changes( + engine.collection_name(), + ServerTimestamp(0), + records + .into_iter() + .map(IncomingBso::from_test_content) + .collect(), + ); + let outgoing = engine + .apply_incoming(vec![incoming], &mut telemetry::Engine::new("tabs")) + .expect("Should apply incoming and stage outgoing records"); + + assert!(outgoing.changes.is_empty()); + + // now check the store has what we think it has. + let sync_impl = engine.sync_impl.lock().unwrap(); + let mut storage = sync_impl.store.storage.lock().unwrap(); + let mut crts = storage.get_remote_tabs().expect("should work"); + crts.sort_by(|a, b| a.client_name.partial_cmp(&b.client_name).unwrap()); + assert_eq!(crts.len(), 2, "we currently include devices with no tabs"); + let crt = &crts[0]; + assert_eq!(crt.client_name, "device with an updated tab"); + assert_eq!(crt.device_type, DeviceType::Unknown); + assert_eq!(crt.remote_tabs.len(), 1); + assert_eq!(crt.remote_tabs[0].title, "the new title"); + + let crt = &crts[1]; + assert_eq!(crt.client_name, "device with no tabs"); + assert_eq!(crt.device_type, DeviceType::Unknown); + assert_eq!(crt.remote_tabs.len(), 0); + } + + #[test] + fn test_no_incoming_doesnt_write() { + env_logger::try_init().ok(); + + let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path( + "test_no_incoming_doesnt_write", + ))); + + let records = vec![json!({ + "id": "device-with-a-tab", + "clientName": "device with a tab", + "tabs": [{ + "title": "the title", + "urlHistory": [ + "https://mozilla.org/" + ], + "icon": "https://mozilla.org/icon", + "lastUsed": 1643764207 + }] + })]; + + let incoming = IncomingChangeset::new_with_changes( + engine.collection_name(), + ServerTimestamp(0), + records + .into_iter() + .map(IncomingBso::from_test_content) + .collect(), + ); + engine + .apply_incoming(vec![incoming], &mut telemetry::Engine::new("tabs")) + .expect("Should apply incoming and stage outgoing records"); + + // now check the store has what we think it has. + { + let sync_impl = engine.sync_impl.lock().unwrap(); + let mut storage = sync_impl.store.storage.lock().unwrap(); + assert_eq!(storage.get_remote_tabs().expect("should work").len(), 1); + } + + // Now another sync with zero incoming records, should still be able to get back + // our one client. + let incoming = IncomingChangeset::new_with_changes( + engine.collection_name(), + ServerTimestamp(0), + vec![], + ); + engine + .apply_incoming(vec![incoming], &mut telemetry::Engine::new("tabs")) + .expect("Should succeed applying zero records"); + + { + let sync_impl = engine.sync_impl.lock().unwrap(); + let mut storage = sync_impl.store.storage.lock().unwrap(); + assert_eq!(storage.get_remote_tabs().expect("should work").len(), 1); + } + } + + #[test] + fn test_sync_manager_registration() { + let store = Arc::new(TabsStore::new_with_mem_path("test-registration")); + assert_eq!(Arc::strong_count(&store), 1); + assert_eq!(Arc::weak_count(&store), 0); + Arc::clone(&store).register_with_sync_manager(); + assert_eq!(Arc::strong_count(&store), 1); + assert_eq!(Arc::weak_count(&store), 1); + let registered = STORE_FOR_MANAGER + .lock() + .unwrap() + .upgrade() + .expect("should upgrade"); + assert!(Arc::ptr_eq(&store, ®istered)); + drop(registered); + // should be no new references + assert_eq!(Arc::strong_count(&store), 1); + assert_eq!(Arc::weak_count(&store), 1); + // dropping the registered object should drop the registration. + drop(store); + assert!(STORE_FOR_MANAGER.lock().unwrap().upgrade().is_none()); + } +} diff --git a/third_party/rust/tabs/src/sync/full_sync.rs b/third_party/rust/tabs/src/sync/full_sync.rs new file mode 100644 index 0000000000..768aecce0f --- /dev/null +++ b/third_party/rust/tabs/src/sync/full_sync.rs @@ -0,0 +1,70 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use crate::{sync::engine::TabsSyncImpl, ApiResult, TabsEngine, TabsStore}; +use error_support::handle_error; +use interrupt_support::NeverInterrupts; +use std::sync::Arc; +use sync15::client::{sync_multiple, MemoryCachedState, Sync15StorageClientInit}; +use sync15::engine::EngineSyncAssociation; +use sync15::KeyBundle; + +impl TabsStore { + #[handle_error(crate::Error)] + pub fn reset(self: Arc<Self>) -> ApiResult<()> { + let mut sync_impl = TabsSyncImpl::new(Arc::clone(&self)); + sync_impl.reset(&EngineSyncAssociation::Disconnected)?; + Ok(()) + } + + /// A convenience wrapper around sync_multiple. + #[handle_error(crate::Error)] + pub fn sync( + self: Arc<Self>, + key_id: String, + access_token: String, + sync_key: String, + tokenserver_url: String, + local_id: String, + ) -> ApiResult<String> { + let mut mem_cached_state = MemoryCachedState::default(); + let engine = TabsEngine::new(Arc::clone(&self)); + + // Since we are syncing without the sync manager, there's no + // command processor, therefore no clients engine, and in + // consequence `TabsStore::prepare_for_sync` is never called + // which means our `local_id` will never be set. + // Do it here. + engine.sync_impl.lock().unwrap().local_id = local_id; + + let storage_init = &Sync15StorageClientInit { + key_id, + access_token, + tokenserver_url: url::Url::parse(tokenserver_url.as_str())?, + }; + let root_sync_key = &KeyBundle::from_ksync_base64(sync_key.as_str())?; + + let mut result = sync_multiple( + &[&engine], + &mut None, + &mut mem_cached_state, + storage_init, + root_sync_key, + &NeverInterrupts, + None, + ); + + // for b/w compat reasons, we do some dances with the result. + // XXX - note that this means telemetry isn't going to be reported back + // to the app - we need to check with lockwise about whether they really + // need these failures to be reported or whether we can loosen this. + if let Err(e) = result.result { + return Err(e.into()); + } + match result.engine_results.remove("tabs") { + None | Some(Ok(())) => Ok(serde_json::to_string(&result.telemetry)?), + Some(Err(e)) => Err(e.into()), + } + } +} diff --git a/third_party/rust/tabs/src/sync/mod.rs b/third_party/rust/tabs/src/sync/mod.rs new file mode 100644 index 0000000000..7d668ff843 --- /dev/null +++ b/third_party/rust/tabs/src/sync/mod.rs @@ -0,0 +1,35 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +pub(crate) mod bridge; +pub(crate) mod engine; +pub(crate) mod record; + +#[cfg(feature = "full-sync")] +pub mod full_sync; + +// When full-sync isn't enabled we need stub versions for these UDL exposed functions. +#[cfg(not(feature = "full-sync"))] +impl crate::TabsStore { + pub fn reset(self: std::sync::Arc<Self>) -> crate::error::ApiResult<()> { + log::warn!("reset: feature not enabled"); + Err(crate::error::TabsApiError::SyncError { + reason: "reset".to_string(), + }) + } + + pub fn sync( + self: std::sync::Arc<Self>, + _key_id: String, + _access_token: String, + _sync_key: String, + _tokenserver_url: String, + _local_id: String, + ) -> crate::error::ApiResult<String> { + log::warn!("sync: feature not enabled"); + Err(crate::error::TabsApiError::SyncError { + reason: "sync".to_string(), + }) + } +} diff --git a/third_party/rust/tabs/src/sync/record.rs b/third_party/rust/tabs/src/sync/record.rs new file mode 100644 index 0000000000..a1da1a33e8 --- /dev/null +++ b/third_party/rust/tabs/src/sync/record.rs @@ -0,0 +1,97 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use serde_derive::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct TabsRecordTab { + pub title: String, + pub url_history: Vec<String>, + pub icon: Option<String>, + pub last_used: i64, // Seconds since epoch! +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +// This struct mirrors what is stored on the server +pub struct TabsRecord { + // `String` instead of `SyncGuid` because some IDs are FxA device ID (XXX - that doesn't + // matter though - this could easily be a Guid!) + pub id: String, + pub client_name: String, + pub tabs: Vec<TabsRecordTab>, +} + +#[cfg(test)] +pub mod test { + use super::*; + use serde_json::json; + + #[test] + fn test_payload() { + let payload = json!({ + "id": "JkeBPC50ZI0m", + "clientName": "client name", + "tabs": [{ + "title": "the title", + "urlHistory": [ + "https://mozilla.org/" + ], + "icon": "https://mozilla.org/icon", + "lastUsed": 1643764207 + }] + }); + let record: TabsRecord = serde_json::from_value(payload).expect("should work"); + assert_eq!(record.id, "JkeBPC50ZI0m"); + assert_eq!(record.client_name, "client name"); + assert_eq!(record.tabs.len(), 1); + let tab = &record.tabs[0]; + assert_eq!(tab.title, "the title"); + assert_eq!(tab.icon, Some("https://mozilla.org/icon".to_string())); + assert_eq!(tab.last_used, 1643764207); + } + + #[test] + fn test_roundtrip() { + let tab = TabsRecord { + id: "JkeBPC50ZI0m".into(), + client_name: "client name".into(), + tabs: vec![TabsRecordTab { + title: "the title".into(), + url_history: vec!["https://mozilla.org/".into()], + icon: Some("https://mozilla.org/icon".into()), + last_used: 1643764207, + }], + }; + let round_tripped = + serde_json::from_value(serde_json::to_value(tab.clone()).unwrap()).unwrap(); + assert_eq!(tab, round_tripped); + } + + #[test] + fn test_extra_fields() { + let payload = json!({ + "id": "JkeBPC50ZI0m", + // Let's say we agree on new tabs to record, we want old versions to + // ignore them! + "ignoredField": "??", + "clientName": "client name", + "tabs": [{ + "title": "the title", + "urlHistory": [ + "https://mozilla.org/" + ], + "icon": "https://mozilla.org/icon", + "lastUsed": 1643764207, + // Ditto - make sure we ignore unexpected fields in each tab. + "ignoredField": "??", + }] + }); + let record: TabsRecord = serde_json::from_value(payload).unwrap(); + // The point of this test is really just to ensure the deser worked, so + // just check the ID. + assert_eq!(record.id, "JkeBPC50ZI0m"); + } +} |