diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 00:47:55 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 00:47:55 +0000 |
commit | 26a029d407be480d791972afb5975cf62c9360a6 (patch) | |
tree | f435a8308119effd964b339f76abb83a57c29483 /toolkit/components/extensions/storage/webext_storage_bridge/src | |
parent | Initial commit. (diff) | |
download | firefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz firefox-26a029d407be480d791972afb5975cf62c9360a6.zip |
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'toolkit/components/extensions/storage/webext_storage_bridge/src')
5 files changed, 1130 insertions, 0 deletions
diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs new file mode 100644 index 0000000000..1418ccca29 --- /dev/null +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs @@ -0,0 +1,484 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use std::{ + cell::{Ref, RefCell}, + convert::TryInto, + ffi::OsString, + mem, + path::PathBuf, + str, + sync::Arc, +}; + +use golden_gate::{ApplyTask, BridgedEngine, FerryTask}; +use moz_task::{self, DispatchOptions, TaskRunnable}; +use nserror::{nsresult, NS_OK}; +use nsstring::{nsACString, nsCString, nsString}; +use thin_vec::ThinVec; +use webext_storage::STORAGE_VERSION; +use xpcom::{ + interfaces::{ + mozIBridgedSyncEngineApplyCallback, mozIBridgedSyncEngineCallback, + mozIExtensionStorageCallback, mozIServicesLogSink, nsIFile, nsISerialEventTarget, + }, + RefPtr, +}; + +use crate::error::{Error, Result}; +use crate::punt::{Punt, PuntTask, TeardownTask}; +use crate::store::{LazyStore, LazyStoreConfig}; + +fn path_from_nsifile(file: &nsIFile) -> Result<PathBuf> { + let mut raw_path = nsString::new(); + // `nsIFile::GetPath` gives us a UTF-16-encoded version of its + // native path, which we must turn back into a platform-native + // string. We can't use `nsIFile::nativePath()` here because + // it's marked as `nostdcall`, which Rust doesn't support. + unsafe { file.GetPath(&mut *raw_path) }.to_result()?; + let native_path = { + // On Windows, we can create a native string directly from the + // encoded path. + #[cfg(windows)] + { + use std::os::windows::prelude::*; + OsString::from_wide(&raw_path) + } + // On other platforms, we must first decode the raw path from + // UTF-16, and then create our native string. + #[cfg(not(windows))] + OsString::from(String::from_utf16(&raw_path)?) + }; + Ok(native_path.into()) +} + +/// An XPCOM component class for the Rust extension storage API. This class +/// implements the interfaces needed for syncing and storage. +/// +/// This class can be created on any thread, but must not be shared between +/// threads. In Rust terms, it's `Send`, but not `Sync`. +#[xpcom( + implement( + mozIExtensionStorageArea, + mozIConfigurableExtensionStorageArea, + mozISyncedExtensionStorageArea, + mozIInterruptible, + mozIBridgedSyncEngine + ), + nonatomic +)] +pub struct StorageSyncArea { + /// A background task queue, used to run all our storage operations on a + /// thread pool. Using a serial event target here means that all operations + /// will execute sequentially. + queue: RefPtr<nsISerialEventTarget>, + /// The store is lazily initialized on the task queue the first time it's + /// used. + store: RefCell<Option<Arc<LazyStore>>>, +} + +/// `mozIExtensionStorageArea` implementation. +impl StorageSyncArea { + /// Creates a storage area and its task queue. + pub fn new() -> Result<RefPtr<StorageSyncArea>> { + let queue = moz_task::create_background_task_queue(cstr!("StorageSyncArea"))?; + Ok(StorageSyncArea::allocate(InitStorageSyncArea { + queue, + store: RefCell::new(Some(Arc::default())), + })) + } + + /// Returns the store for this area, or an error if it's been torn down. + fn store(&self) -> Result<Ref<'_, Arc<LazyStore>>> { + let maybe_store = self.store.borrow(); + if maybe_store.is_some() { + Ok(Ref::map(maybe_store, |s| s.as_ref().unwrap())) + } else { + Err(Error::AlreadyTornDown) + } + } + + /// Dispatches a task for a storage operation to the task queue. + fn dispatch(&self, punt: Punt, callback: &mozIExtensionStorageCallback) -> Result<()> { + let name = punt.name(); + let task = PuntTask::new(Arc::downgrade(&*self.store()?), punt, callback)?; + let runnable = TaskRunnable::new(name, Box::new(task))?; + // `may_block` schedules the runnable on a dedicated I/O pool. + TaskRunnable::dispatch_with_options( + runnable, + self.queue.coerce(), + DispatchOptions::new().may_block(true), + )?; + Ok(()) + } + + xpcom_method!( + configure => Configure( + database_file: *const nsIFile, + kinto_file: *const nsIFile + ) + ); + /// Sets up the storage area. + fn configure(&self, database_file: &nsIFile, kinto_file: &nsIFile) -> Result<()> { + self.store()?.configure(LazyStoreConfig { + path: path_from_nsifile(database_file)?, + kinto_path: path_from_nsifile(kinto_file)?, + })?; + Ok(()) + } + + xpcom_method!( + set => Set( + ext_id: *const ::nsstring::nsACString, + json: *const ::nsstring::nsACString, + callback: *const mozIExtensionStorageCallback + ) + ); + /// Sets one or more key-value pairs. + fn set( + &self, + ext_id: &nsACString, + json: &nsACString, + callback: &mozIExtensionStorageCallback, + ) -> Result<()> { + self.dispatch( + Punt::Set { + ext_id: str::from_utf8(ext_id)?.into(), + value: serde_json::from_str(str::from_utf8(json)?)?, + }, + callback, + )?; + Ok(()) + } + + xpcom_method!( + get => Get( + ext_id: *const ::nsstring::nsACString, + json: *const ::nsstring::nsACString, + callback: *const mozIExtensionStorageCallback + ) + ); + /// Gets values for one or more keys. + fn get( + &self, + ext_id: &nsACString, + json: &nsACString, + callback: &mozIExtensionStorageCallback, + ) -> Result<()> { + self.dispatch( + Punt::Get { + ext_id: str::from_utf8(ext_id)?.into(), + keys: serde_json::from_str(str::from_utf8(json)?)?, + }, + callback, + ) + } + + xpcom_method!( + remove => Remove( + ext_id: *const ::nsstring::nsACString, + json: *const ::nsstring::nsACString, + callback: *const mozIExtensionStorageCallback + ) + ); + /// Removes one or more keys and their values. + fn remove( + &self, + ext_id: &nsACString, + json: &nsACString, + callback: &mozIExtensionStorageCallback, + ) -> Result<()> { + self.dispatch( + Punt::Remove { + ext_id: str::from_utf8(ext_id)?.into(), + keys: serde_json::from_str(str::from_utf8(json)?)?, + }, + callback, + ) + } + + xpcom_method!( + clear => Clear( + ext_id: *const ::nsstring::nsACString, + callback: *const mozIExtensionStorageCallback + ) + ); + /// Removes all keys and values for the specified extension. + fn clear(&self, ext_id: &nsACString, callback: &mozIExtensionStorageCallback) -> Result<()> { + self.dispatch( + Punt::Clear { + ext_id: str::from_utf8(ext_id)?.into(), + }, + callback, + ) + } + + xpcom_method!( + getBytesInUse => GetBytesInUse( + ext_id: *const ::nsstring::nsACString, + keys: *const ::nsstring::nsACString, + callback: *const mozIExtensionStorageCallback + ) + ); + /// Obtains the count of bytes in use for the specified key or for all keys. + fn getBytesInUse( + &self, + ext_id: &nsACString, + keys: &nsACString, + callback: &mozIExtensionStorageCallback, + ) -> Result<()> { + self.dispatch( + Punt::GetBytesInUse { + ext_id: str::from_utf8(ext_id)?.into(), + keys: serde_json::from_str(str::from_utf8(keys)?)?, + }, + callback, + ) + } + + xpcom_method!(teardown => Teardown(callback: *const mozIExtensionStorageCallback)); + /// Tears down the storage area, closing the backing database connection. + fn teardown(&self, callback: &mozIExtensionStorageCallback) -> Result<()> { + // Each storage task holds a `Weak` reference to the store, which it + // upgrades to an `Arc` (strong reference) when the task runs on the + // background queue. The strong reference is dropped when the task + // finishes. When we tear down the storage area, we relinquish our one + // owned strong reference to the `TeardownTask`. Because we're using a + // task queue, when the `TeardownTask` runs, it should have the only + // strong reference to the store, since all other tasks that called + // `Weak::upgrade` will have already finished. The `TeardownTask` can + // then consume the `Arc` and destroy the store. + let mut maybe_store = self.store.borrow_mut(); + match mem::take(&mut *maybe_store) { + Some(store) => { + // Interrupt any currently-running statements. + store.interrupt(); + // If dispatching the runnable fails, we'll leak the store + // without closing its database connection. + teardown(&self.queue, store, callback)?; + } + None => return Err(Error::AlreadyTornDown), + } + Ok(()) + } + + xpcom_method!(takeMigrationInfo => TakeMigrationInfo(callback: *const mozIExtensionStorageCallback)); + + /// Fetch-and-delete (e.g. `take`) information about the migration from the + /// kinto-based extension-storage to the rust-based storage. + fn takeMigrationInfo(&self, callback: &mozIExtensionStorageCallback) -> Result<()> { + self.dispatch(Punt::TakeMigrationInfo, callback) + } +} + +fn teardown( + queue: &nsISerialEventTarget, + store: Arc<LazyStore>, + callback: &mozIExtensionStorageCallback, +) -> Result<()> { + let task = TeardownTask::new(store, callback)?; + let runnable = TaskRunnable::new(TeardownTask::name(), Box::new(task))?; + TaskRunnable::dispatch_with_options( + runnable, + queue.coerce(), + DispatchOptions::new().may_block(true), + )?; + Ok(()) +} + +/// `mozISyncedExtensionStorageArea` implementation. +impl StorageSyncArea { + xpcom_method!( + fetch_pending_sync_changes => FetchPendingSyncChanges(callback: *const mozIExtensionStorageCallback) + ); + fn fetch_pending_sync_changes(&self, callback: &mozIExtensionStorageCallback) -> Result<()> { + self.dispatch(Punt::FetchPendingSyncChanges, callback) + } +} + +/// `mozIInterruptible` implementation. +impl StorageSyncArea { + xpcom_method!( + interrupt => Interrupt() + ); + /// Interrupts any operations currently running on the background task + /// queue. + fn interrupt(&self) -> Result<()> { + self.store()?.interrupt(); + Ok(()) + } +} + +/// `mozIBridgedSyncEngine` implementation. +impl StorageSyncArea { + xpcom_method!(get_logger => GetLogger() -> *const mozIServicesLogSink); + fn get_logger(&self) -> Result<RefPtr<mozIServicesLogSink>> { + Err(NS_OK)? + } + + xpcom_method!(set_logger => SetLogger(logger: *const mozIServicesLogSink)); + fn set_logger(&self, _logger: Option<&mozIServicesLogSink>) -> Result<()> { + Ok(()) + } + + xpcom_method!(get_storage_version => GetStorageVersion() -> i32); + fn get_storage_version(&self) -> Result<i32> { + Ok(STORAGE_VERSION.try_into().unwrap()) + } + + // It's possible that migration, or even merging, will result in records + // too large for the server. We tolerate that (and hope that the addons do + // too :) + xpcom_method!(get_allow_skipped_record => GetAllowSkippedRecord() -> bool); + fn get_allow_skipped_record(&self) -> Result<bool> { + Ok(true) + } + + xpcom_method!( + get_last_sync => GetLastSync( + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn get_last_sync(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> { + Ok(FerryTask::for_last_sync(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + xpcom_method!( + set_last_sync => SetLastSync( + last_sync_millis: i64, + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn set_last_sync( + &self, + last_sync_millis: i64, + callback: &mozIBridgedSyncEngineCallback, + ) -> Result<()> { + Ok( + FerryTask::for_set_last_sync(self.new_bridge()?, last_sync_millis, callback)? + .dispatch(&self.queue)?, + ) + } + + xpcom_method!( + get_sync_id => GetSyncId( + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn get_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> { + Ok(FerryTask::for_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + xpcom_method!( + reset_sync_id => ResetSyncId( + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn reset_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> { + Ok(FerryTask::for_reset_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + xpcom_method!( + ensure_current_sync_id => EnsureCurrentSyncId( + new_sync_id: *const nsACString, + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn ensure_current_sync_id( + &self, + new_sync_id: &nsACString, + callback: &mozIBridgedSyncEngineCallback, + ) -> Result<()> { + Ok( + FerryTask::for_ensure_current_sync_id(self.new_bridge()?, new_sync_id, callback)? + .dispatch(&self.queue)?, + ) + } + + xpcom_method!( + sync_started => SyncStarted( + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn sync_started(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> { + Ok(FerryTask::for_sync_started(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + xpcom_method!( + store_incoming => StoreIncoming( + incoming_envelopes_json: *const ThinVec<::nsstring::nsCString>, + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn store_incoming( + &self, + incoming_envelopes_json: Option<&ThinVec<nsCString>>, + callback: &mozIBridgedSyncEngineCallback, + ) -> Result<()> { + Ok(FerryTask::for_store_incoming( + self.new_bridge()?, + incoming_envelopes_json.map(|v| v.as_slice()).unwrap_or(&[]), + callback, + )? + .dispatch(&self.queue)?) + } + + xpcom_method!(apply => Apply(callback: *const mozIBridgedSyncEngineApplyCallback)); + fn apply(&self, callback: &mozIBridgedSyncEngineApplyCallback) -> Result<()> { + Ok(ApplyTask::new(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + xpcom_method!( + set_uploaded => SetUploaded( + server_modified_millis: i64, + uploaded_ids: *const ThinVec<::nsstring::nsCString>, + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn set_uploaded( + &self, + server_modified_millis: i64, + uploaded_ids: Option<&ThinVec<nsCString>>, + callback: &mozIBridgedSyncEngineCallback, + ) -> Result<()> { + Ok(FerryTask::for_set_uploaded( + self.new_bridge()?, + server_modified_millis, + uploaded_ids.map(|v| v.as_slice()).unwrap_or(&[]), + callback, + )? + .dispatch(&self.queue)?) + } + + xpcom_method!( + sync_finished => SyncFinished( + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn sync_finished(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> { + Ok(FerryTask::for_sync_finished(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + xpcom_method!( + reset => Reset( + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn reset(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> { + Ok(FerryTask::for_reset(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + xpcom_method!( + wipe => Wipe( + callback: *const mozIBridgedSyncEngineCallback + ) + ); + fn wipe(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> { + Ok(FerryTask::for_wipe(self.new_bridge()?, callback)?.dispatch(&self.queue)?) + } + + fn new_bridge(&self) -> Result<Box<dyn BridgedEngine>> { + Ok(Box::new(self.store()?.get()?.bridged_engine())) + } +} diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/error.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/error.rs new file mode 100644 index 0000000000..877b2b21a8 --- /dev/null +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/error.rs @@ -0,0 +1,124 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use std::{error, fmt, result, str::Utf8Error, string::FromUtf16Error}; + +use golden_gate::Error as GoldenGateError; +use nserror::{ + nsresult, NS_ERROR_ALREADY_INITIALIZED, NS_ERROR_CANNOT_CONVERT_DATA, + NS_ERROR_DOM_QUOTA_EXCEEDED_ERR, NS_ERROR_FAILURE, NS_ERROR_INVALID_ARG, + NS_ERROR_NOT_IMPLEMENTED, NS_ERROR_NOT_INITIALIZED, NS_ERROR_UNEXPECTED, +}; +use serde_json::error::Error as JsonError; +use webext_storage::error::Error as WebextStorageError; + +/// A specialized `Result` type for extension storage operations. +pub type Result<T> = result::Result<T, Error>; + +/// The error type for extension storage operations. Errors can be converted +/// into `nsresult` codes, and include more detailed messages that can be passed +/// to callbacks. +#[derive(Debug)] +pub enum Error { + Nsresult(nsresult), + WebextStorage(WebextStorageError), + MigrationFailed(WebextStorageError), + GoldenGate(GoldenGateError), + MalformedString(Box<dyn error::Error + Send + Sync + 'static>), + AlreadyConfigured, + NotConfigured, + AlreadyRan(&'static str), + DidNotRun(&'static str), + AlreadyTornDown, + NotImplemented, +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self { + Error::MalformedString(error) => Some(error.as_ref()), + _ => None, + } + } +} + +impl From<nsresult> for Error { + fn from(result: nsresult) -> Error { + Error::Nsresult(result) + } +} + +impl From<WebextStorageError> for Error { + fn from(error: WebextStorageError) -> Error { + Error::WebextStorage(error) + } +} + +impl From<GoldenGateError> for Error { + fn from(error: GoldenGateError) -> Error { + Error::GoldenGate(error) + } +} + +impl From<Utf8Error> for Error { + fn from(error: Utf8Error) -> Error { + Error::MalformedString(error.into()) + } +} + +impl From<FromUtf16Error> for Error { + fn from(error: FromUtf16Error) -> Error { + Error::MalformedString(error.into()) + } +} + +impl From<JsonError> for Error { + fn from(error: JsonError) -> Error { + Error::MalformedString(error.into()) + } +} + +impl From<Error> for nsresult { + fn from(error: Error) -> nsresult { + match error { + Error::Nsresult(result) => result, + Error::WebextStorage(e) => match e { + WebextStorageError::QuotaError(_) => NS_ERROR_DOM_QUOTA_EXCEEDED_ERR, + _ => NS_ERROR_FAILURE, + }, + Error::MigrationFailed(_) => NS_ERROR_CANNOT_CONVERT_DATA, + Error::GoldenGate(error) => error.into(), + Error::MalformedString(_) => NS_ERROR_INVALID_ARG, + Error::AlreadyConfigured => NS_ERROR_ALREADY_INITIALIZED, + Error::NotConfigured => NS_ERROR_NOT_INITIALIZED, + Error::AlreadyRan(_) => NS_ERROR_UNEXPECTED, + Error::DidNotRun(_) => NS_ERROR_UNEXPECTED, + Error::AlreadyTornDown => NS_ERROR_UNEXPECTED, + Error::NotImplemented => NS_ERROR_NOT_IMPLEMENTED, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::Nsresult(result) => write!(f, "Operation failed with {result}"), + Error::WebextStorage(error) => error.fmt(f), + Error::MigrationFailed(error) => write!(f, "Migration failed with {error}"), + Error::GoldenGate(error) => error.fmt(f), + Error::MalformedString(error) => error.fmt(f), + Error::AlreadyConfigured => write!(f, "The storage area is already configured"), + Error::NotConfigured => write!( + f, + "The storage area must be configured by calling `configure` first" + ), + Error::AlreadyRan(what) => write!(f, "`{what}` already ran on the background thread"), + Error::DidNotRun(what) => write!(f, "`{what}` didn't run on the background thread"), + Error::AlreadyTornDown => { + write!(f, "Can't use a storage area that's already torn down") + } + Error::NotImplemented => write!(f, "Operation not implemented"), + } + } +} diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/lib.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/lib.rs new file mode 100644 index 0000000000..94133ef1e9 --- /dev/null +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/lib.rs @@ -0,0 +1,65 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#![allow(non_snake_case)] + +//! This crate bridges the WebExtension storage area interfaces in Firefox +//! Desktop to the extension storage Rust component in Application Services. +//! +//! ## How are the WebExtension storage APIs implemented in Firefox? +//! +//! There are three storage APIs available for WebExtensions: +//! `storage.local`, which is stored locally in an IndexedDB database and never +//! synced to other devices, `storage.sync`, which is stored in a local SQLite +//! database and synced to all devices signed in to the same Firefox Account, +//! and `storage.managed`, which is provisioned in a native manifest and +//! read-only. +//! +//! * `storage.local` is implemented in `ExtensionStorageIDB.jsm`. +//! * `storage.sync` is implemented in a Rust component, `webext_storage`. This +//! Rust component is vendored in m-c, and exposed to JavaScript via an XPCOM +//! API in `webext_storage_bridge` (this crate). Eventually, we'll change +//! `ExtensionStorageSync.jsm` to call the XPCOM API instead of using the +//! old Kinto storage adapter. +//! * `storage.managed` is implemented directly in `parent/ext-storage.js`. +//! +//! `webext_storage_bridge` implements the `mozIExtensionStorageArea` +//! (and, eventually, `mozIBridgedSyncEngine`) interface for `storage.sync`. The +//! implementation is in `area::StorageSyncArea`, and is backed by the +//! `webext_storage` component. + +#[macro_use] +extern crate cstr; +#[macro_use] +extern crate xpcom; + +mod area; +mod error; +mod punt; +mod store; + +use nserror::{nsresult, NS_OK}; +use xpcom::{interfaces::mozIExtensionStorageArea, RefPtr}; + +use crate::area::StorageSyncArea; + +/// The constructor for a `storage.sync` area. This uses C linkage so that it +/// can be called from C++. See `ExtensionStorageComponents.h` for the C++ +/// constructor that's passed to the component manager. +/// +/// # Safety +/// +/// This function is unsafe because it dereferences `result`. +#[no_mangle] +pub unsafe extern "C" fn NS_NewExtensionStorageSyncArea( + result: *mut *const mozIExtensionStorageArea, +) -> nsresult { + match StorageSyncArea::new() { + Ok(bridge) => { + RefPtr::new(bridge.coerce::<mozIExtensionStorageArea>()).forget(&mut *result); + NS_OK + } + Err(err) => err.into(), + } +} diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs new file mode 100644 index 0000000000..4740237942 --- /dev/null +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs @@ -0,0 +1,321 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use std::{ + borrow::Borrow, + fmt::Write, + mem, result, str, + sync::{Arc, Weak}, +}; + +use atomic_refcell::AtomicRefCell; +use moz_task::{Task, ThreadPtrHandle, ThreadPtrHolder}; +use nserror::nsresult; +use nsstring::nsCString; +use serde::Serialize; +use serde_json::Value as JsonValue; +use storage_variant::VariantType; +use xpcom::{ + interfaces::{mozIExtensionStorageCallback, mozIExtensionStorageListener}, + RefPtr, XpCom, +}; + +use crate::error::{Error, Result}; +use crate::store::LazyStore; + +/// A storage operation that's punted from the main thread to the background +/// task queue. +pub enum Punt { + /// Get the values of the keys for an extension. + Get { ext_id: String, keys: JsonValue }, + /// Set a key-value pair for an extension. + Set { ext_id: String, value: JsonValue }, + /// Remove one or more keys for an extension. + Remove { ext_id: String, keys: JsonValue }, + /// Clear all keys and values for an extension. + Clear { ext_id: String }, + /// Returns the bytes in use for the specified, or all, keys. + GetBytesInUse { ext_id: String, keys: JsonValue }, + /// Fetches all pending Sync change notifications to pass to + /// `storage.onChanged` listeners. + FetchPendingSyncChanges, + /// Fetch-and-delete (e.g. `take`) information about the migration from the + /// kinto-based extension-storage to the rust-based storage. + /// + /// This data is stored in the database instead of just being returned by + /// the call to `migrate`, as we may migrate prior to telemetry being ready. + TakeMigrationInfo, +} + +impl Punt { + /// Returns the operation name, used to label the task runnable and report + /// errors. + pub fn name(&self) -> &'static str { + match self { + Punt::Get { .. } => "webext_storage::get", + Punt::Set { .. } => "webext_storage::set", + Punt::Remove { .. } => "webext_storage::remove", + Punt::Clear { .. } => "webext_storage::clear", + Punt::GetBytesInUse { .. } => "webext_storage::get_bytes_in_use", + Punt::FetchPendingSyncChanges => "webext_storage::fetch_pending_sync_changes", + Punt::TakeMigrationInfo => "webext_storage::take_migration_info", + } + } +} + +/// A storage operation result, punted from the background queue back to the +/// main thread. +#[derive(Default)] +struct PuntResult { + changes: Vec<Change>, + value: Option<String>, +} + +/// A change record for an extension. +struct Change { + ext_id: String, + json: String, +} + +impl PuntResult { + /// Creates a result with a single change to pass to `onChanged`, and no + /// return value for `handleSuccess`. The `Borrow` bound lets this method + /// take either a borrowed reference or an owned value. + fn with_change<T: Borrow<S>, S: Serialize>(ext_id: &str, changes: T) -> Result<Self> { + Ok(PuntResult { + changes: vec![Change { + ext_id: ext_id.into(), + json: serde_json::to_string(changes.borrow())?, + }], + value: None, + }) + } + + /// Creates a result with changes for multiple extensions to pass to + /// `onChanged`, and no return value for `handleSuccess`. + fn with_changes(changes: Vec<Change>) -> Self { + PuntResult { + changes, + value: None, + } + } + + /// Creates a result with no changes to pass to `onChanged`, and a return + /// value for `handleSuccess`. + fn with_value<T: Borrow<S>, S: Serialize>(value: T) -> Result<Self> { + Ok(PuntResult { + changes: Vec::new(), + value: Some(serde_json::to_string(value.borrow())?), + }) + } +} + +/// A generic task used for all storage operations. Punts the operation to the +/// background task queue, receives a result back on the main thread, and calls +/// the callback with it. +pub struct PuntTask { + name: &'static str, + /// Storage tasks hold weak references to the store, which they upgrade + /// to strong references when running on the background queue. This + /// ensures that pending storage tasks don't block teardown (for example, + /// if a consumer calls `get` and then `teardown`, without waiting for + /// `get` to finish). + store: Weak<LazyStore>, + punt: AtomicRefCell<Option<Punt>>, + callback: ThreadPtrHandle<mozIExtensionStorageCallback>, + result: AtomicRefCell<Result<PuntResult>>, +} + +impl PuntTask { + /// Creates a storage task that punts an operation to the background queue. + /// Returns an error if the task couldn't be created because the thread + /// manager is shutting down. + pub fn new( + store: Weak<LazyStore>, + punt: Punt, + callback: &mozIExtensionStorageCallback, + ) -> Result<Self> { + let name = punt.name(); + Ok(Self { + name, + store, + punt: AtomicRefCell::new(Some(punt)), + callback: ThreadPtrHolder::new( + cstr!("mozIExtensionStorageCallback"), + RefPtr::new(callback), + )?, + result: AtomicRefCell::new(Err(Error::DidNotRun(name))), + }) + } + + /// Upgrades the task's weak `LazyStore` reference to a strong one. Returns + /// an error if the store has been torn down. + /// + /// It's important that this is called on the background queue, after the + /// task has been dispatched. Storage tasks shouldn't hold strong references + /// to the store on the main thread, because then they might block teardown. + fn store(&self) -> Result<Arc<LazyStore>> { + match self.store.upgrade() { + Some(store) => Ok(store), + None => Err(Error::AlreadyTornDown), + } + } + + /// Runs this task's storage operation on the background queue. + fn inner_run(&self, punt: Punt) -> Result<PuntResult> { + Ok(match punt { + Punt::Set { ext_id, value } => { + PuntResult::with_change(&ext_id, self.store()?.get()?.set(&ext_id, value)?)? + } + Punt::Get { ext_id, keys } => { + PuntResult::with_value(self.store()?.get()?.get(&ext_id, keys)?)? + } + Punt::Remove { ext_id, keys } => { + PuntResult::with_change(&ext_id, self.store()?.get()?.remove(&ext_id, keys)?)? + } + Punt::Clear { ext_id } => { + PuntResult::with_change(&ext_id, self.store()?.get()?.clear(&ext_id)?)? + } + Punt::GetBytesInUse { ext_id, keys } => { + PuntResult::with_value(self.store()?.get()?.get_bytes_in_use(&ext_id, keys)?)? + } + Punt::FetchPendingSyncChanges => PuntResult::with_changes( + self.store()? + .get()? + .get_synced_changes()? + .into_iter() + .map(|info| Change { + ext_id: info.ext_id, + json: info.changes, + }) + .collect(), + ), + Punt::TakeMigrationInfo => { + PuntResult::with_value(self.store()?.get()?.take_migration_info()?)? + } + }) + } +} + +impl Task for PuntTask { + fn run(&self) { + *self.result.borrow_mut() = match self.punt.borrow_mut().take() { + Some(punt) => self.inner_run(punt), + // A task should never run on the background queue twice, but we + // return an error just in case. + None => Err(Error::AlreadyRan(self.name)), + }; + } + + fn done(&self) -> result::Result<(), nsresult> { + let callback = self.callback.get().unwrap(); + // As above, `done` should never be called multiple times, but we handle + // that by returning an error. + match mem::replace( + &mut *self.result.borrow_mut(), + Err(Error::AlreadyRan(self.name)), + ) { + Ok(PuntResult { changes, value }) => { + // If we have change data, and the callback implements the + // listener interface, notify about it first. + if let Some(listener) = callback.query_interface::<mozIExtensionStorageListener>() { + for Change { ext_id, json } in changes { + // Ignore errors. + let _ = unsafe { + listener.OnChanged(&*nsCString::from(ext_id), &*nsCString::from(json)) + }; + } + } + let result = value.map(nsCString::from).into_variant(); + unsafe { callback.HandleSuccess(result.coerce()) } + } + Err(err) => { + let mut message = nsCString::new(); + write!(message, "{err}").unwrap(); + unsafe { callback.HandleError(err.into(), &*message) } + } + } + .to_result() + } +} + +/// A task to tear down the store on the background task queue. +pub struct TeardownTask { + /// Unlike storage tasks, the teardown task holds a strong reference to + /// the store, which it drops on the background queue. This is the only + /// task that should do that. + store: AtomicRefCell<Option<Arc<LazyStore>>>, + callback: ThreadPtrHandle<mozIExtensionStorageCallback>, + result: AtomicRefCell<Result<()>>, +} + +impl TeardownTask { + /// Creates a teardown task. This should only be created and dispatched + /// once, to clean up the store at shutdown. Returns an error if the task + /// couldn't be created because the thread manager is shutting down. + pub fn new(store: Arc<LazyStore>, callback: &mozIExtensionStorageCallback) -> Result<Self> { + Ok(Self { + store: AtomicRefCell::new(Some(store)), + callback: ThreadPtrHolder::new( + cstr!("mozIExtensionStorageCallback"), + RefPtr::new(callback), + )?, + result: AtomicRefCell::new(Err(Error::DidNotRun(Self::name()))), + }) + } + + /// Returns the task name, used to label its runnable and report errors. + pub fn name() -> &'static str { + "webext_storage::teardown" + } + + /// Tears down and drops the store on the background queue. + fn inner_run(&self, store: Arc<LazyStore>) -> Result<()> { + // At this point, we should be holding the only strong reference + // to the store, since 1) `StorageSyncArea` gave its one strong + // reference to our task, and 2) we're running on a background + // task queue, which runs all tasks sequentially...so no other + // `PuntTask`s should be running and trying to upgrade their + // weak references. So we can unwrap the `Arc` and take ownership + // of the store. + match Arc::try_unwrap(store) { + Ok(store) => store.teardown(), + Err(_) => { + // If unwrapping the `Arc` fails, someone else must have + // a strong reference to the store. We could sleep and + // try again, but this is so unexpected that it's easier + // to just leak the store, and return an error to the + // callback. Except in tests, we only call `teardown` at + // shutdown, so the resources will get reclaimed soon, + // anyway. + Err(Error::DidNotRun(Self::name())) + } + } + } +} + +impl Task for TeardownTask { + fn run(&self) { + *self.result.borrow_mut() = match self.store.borrow_mut().take() { + Some(store) => self.inner_run(store), + None => Err(Error::AlreadyRan(Self::name())), + }; + } + + fn done(&self) -> result::Result<(), nsresult> { + let callback = self.callback.get().unwrap(); + match mem::replace( + &mut *self.result.borrow_mut(), + Err(Error::AlreadyRan(Self::name())), + ) { + Ok(()) => unsafe { callback.HandleSuccess(().into_variant().coerce()) }, + Err(err) => { + let mut message = nsCString::new(); + write!(message, "{err}").unwrap(); + unsafe { callback.HandleError(err.into(), &*message) } + } + } + .to_result() + } +} diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs new file mode 100644 index 0000000000..cb1ce07784 --- /dev/null +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs @@ -0,0 +1,136 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use std::{fs::remove_file, path::PathBuf, sync::Arc}; + +use interrupt_support::SqlInterruptHandle; +use once_cell::sync::OnceCell; +use webext_storage::store::WebExtStorageStore as Store; + +use crate::error::{self, Error}; + +/// Options for an extension storage area. +pub struct LazyStoreConfig { + /// The path to the database file for this storage area. + pub path: PathBuf, + /// The path to the old kinto database. If it exists, we should attempt to + /// migrate from this database as soon as we open our DB. It's not Option<> + /// because the caller will not have checked whether it exists or not, so + /// will assume it might. + pub kinto_path: PathBuf, +} + +/// A lazy store is automatically initialized on a background thread with its +/// configuration the first time it's used. +#[derive(Default)] +pub struct LazyStore { + store: OnceCell<InterruptStore>, + config: OnceCell<LazyStoreConfig>, +} + +/// An `InterruptStore` wraps an inner extension store, and its interrupt +/// handle. +struct InterruptStore { + inner: Store, + handle: Arc<SqlInterruptHandle>, +} + +impl LazyStore { + /// Configures the lazy store. Returns an error if the store has already + /// been configured. This method should be called from the main thread. + pub fn configure(&self, config: LazyStoreConfig) -> error::Result<()> { + self.config + .set(config) + .map_err(|_| Error::AlreadyConfigured) + } + + /// Interrupts all pending operations on the store. If a database statement + /// is currently running, this will interrupt that statement. If the + /// statement is a write inside an active transaction, the entire + /// transaction will be rolled back. This method should be called from the + /// main thread. + pub fn interrupt(&self) { + if let Some(outer) = self.store.get() { + outer.handle.interrupt(); + } + } + + /// Returns the underlying store, initializing it if needed. This method + /// should only be called from a background thread or task queue, since + /// opening the database does I/O. + pub fn get(&self) -> error::Result<&Store> { + Ok(&self + .store + .get_or_try_init(|| match self.config.get() { + Some(config) => { + let store = init_store(config)?; + let handle = store.interrupt_handle(); + Ok(InterruptStore { + inner: store, + handle, + }) + } + None => Err(Error::NotConfigured), + })? + .inner) + } + + /// Tears down the store. If the store wasn't initialized, this is a no-op. + /// This should only be called from a background thread or task queue, + /// because closing the database also does I/O. + pub fn teardown(self) -> error::Result<()> { + if let Some(store) = self.store.into_inner() { + store.inner.close()?; + } + Ok(()) + } +} + +// Initialize the store, performing a migration if necessary. +// The requirements for migration are, roughly: +// * If kinto_path doesn't exist, we don't try to migrate. +// * If our DB path exists, we assume we've already migrated and don't try again +// * If the migration fails, we close our store and delete the DB, then return +// a special error code which tells our caller about the failure. It's then +// expected to fallback to the "old" kinto store and we'll try next time. +// Note that the migrate() method on the store is written such that is should +// ignore all "read" errors from the source, but propagate "write" errors on our +// DB - the intention is that things like corrupted source databases never fail, +// but disk-space failures on our database does. +fn init_store(config: &LazyStoreConfig) -> error::Result<Store> { + let should_migrate = config.kinto_path.exists() && !config.path.exists(); + let store = Store::new(&config.path)?; + if should_migrate { + match store.migrate(&config.kinto_path) { + // It's likely to be too early for us to stick the MigrationInfo + // into the sync telemetry, a separate call to `take_migration_info` + // must be made to the store (this is done by telemetry after it's + // ready to submit the data). + Ok(()) => { + // need logging, but for now let's print to stdout. + println!("extension-storage: migration complete"); + Ok(store) + } + Err(e) => { + println!("extension-storage: migration failure: {e}"); + if let Err(e) = store.close() { + // welp, this probably isn't going to end well... + println!( + "extension-storage: failed to close the store after migration failure: {e}" + ); + } + if let Err(e) = remove_file(&config.path) { + // this is bad - if it happens regularly it will defeat + // out entire migration strategy - we'll assume it + // worked. + // So it's desirable to make noise if this happens. + println!("Failed to remove file after failed migration: {e}"); + } + Err(Error::MigrationFailed(e)) + } + } + } else { + Ok(store) + } +} |