summaryrefslogtreecommitdiffstats
path: root/toolkit/components/extensions/storage
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
commit0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d (patch)
treea31f07c9bcca9d56ce61e9a1ffd30ef350d513aa /toolkit/components/extensions/storage
parentInitial commit. (diff)
downloadfirefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.tar.xz
firefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.zip
Adding upstream version 115.8.0esr.upstream/115.8.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'toolkit/components/extensions/storage')
-rw-r--r--toolkit/components/extensions/storage/ExtensionStorageComponents.h40
-rw-r--r--toolkit/components/extensions/storage/ExtensionStorageComponents.sys.mjs114
-rw-r--r--toolkit/components/extensions/storage/components.conf22
-rw-r--r--toolkit/components/extensions/storage/moz.build33
-rw-r--r--toolkit/components/extensions/storage/mozIExtensionStorageArea.idl127
-rw-r--r--toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml25
-rw-r--r--toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs484
-rw-r--r--toolkit/components/extensions/storage/webext_storage_bridge/src/error.rs125
-rw-r--r--toolkit/components/extensions/storage/webext_storage_bridge/src/lib.rs65
-rw-r--r--toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs321
-rw-r--r--toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs136
11 files changed, 1492 insertions, 0 deletions
diff --git a/toolkit/components/extensions/storage/ExtensionStorageComponents.h b/toolkit/components/extensions/storage/ExtensionStorageComponents.h
new file mode 100644
index 0000000000..53af177432
--- /dev/null
+++ b/toolkit/components/extensions/storage/ExtensionStorageComponents.h
@@ -0,0 +1,40 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_extensions_storage_ExtensionStorageComponents_h_
+#define mozilla_extensions_storage_ExtensionStorageComponents_h_
+
+#include "mozIExtensionStorageArea.h"
+#include "nsCOMPtr.h"
+
+extern "C" {
+
+// Implemented in Rust, in the `webext_storage_bridge` crate.
+nsresult NS_NewExtensionStorageSyncArea(mozIExtensionStorageArea** aResult);
+
+} // extern "C"
+
+namespace mozilla {
+namespace extensions {
+namespace storage {
+
+// The C++ constructor for a `storage.sync` area. This wrapper exists because
+// `components.conf` requires a component class constructor to return an
+// `already_AddRefed<T>`, but Rust doesn't have such a type. So we call the
+// Rust constructor using a `nsCOMPtr` (which is compatible with Rust's
+// `xpcom::RefPtr`) out param, and return that.
+already_AddRefed<mozIExtensionStorageArea> NewSyncArea() {
+ nsCOMPtr<mozIExtensionStorageArea> storage;
+ nsresult rv = NS_NewExtensionStorageSyncArea(getter_AddRefs(storage));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return nullptr;
+ }
+ return storage.forget();
+}
+
+} // namespace storage
+} // namespace extensions
+} // namespace mozilla
+
+#endif // mozilla_extensions_storage_ExtensionStorageComponents_h_
diff --git a/toolkit/components/extensions/storage/ExtensionStorageComponents.sys.mjs b/toolkit/components/extensions/storage/ExtensionStorageComponents.sys.mjs
new file mode 100644
index 0000000000..b02b0d53e8
--- /dev/null
+++ b/toolkit/components/extensions/storage/ExtensionStorageComponents.sys.mjs
@@ -0,0 +1,114 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const lazy = {};
+
+ChromeUtils.defineESModuleGetters(lazy, {
+ AsyncShutdown: "resource://gre/modules/AsyncShutdown.sys.mjs",
+ FileUtils: "resource://gre/modules/FileUtils.sys.mjs",
+});
+
+const StorageSyncArea = Components.Constructor(
+ "@mozilla.org/extensions/storage/internal/sync-area;1",
+ "mozIConfigurableExtensionStorageArea",
+ "configure"
+);
+
+/**
+ * An XPCOM service for the WebExtension `storage.sync` API. The service manages
+ * a storage area for storing and syncing extension data.
+ *
+ * The service configures its storage area with the database path, and hands
+ * out references to the configured area via `getInterface`. It also registers
+ * a shutdown blocker to automatically tear down the area.
+ *
+ * ## What's the difference between `storage/internal/storage-sync-area;1` and
+ * `storage/sync;1`?
+ *
+ * `components.conf` has two classes:
+ * `@mozilla.org/extensions/storage/internal/sync-area;1` and
+ * `@mozilla.org/extensions/storage/sync;1`.
+ *
+ * The `storage/internal/sync-area;1` class is implemented in Rust, and can be
+ * instantiated using `createInstance` and `Components.Constructor`. It's not
+ * a singleton, so creating a new instance will create a new `storage.sync`
+ * area, with its own database connection. It's useful for testing, but not
+ * meant to be used outside of this module.
+ *
+ * The `storage/sync;1` class is implemented in this file. It's a singleton,
+ * ensuring there's only one `storage.sync` area, with one database connection.
+ * The service implements `nsIInterfaceRequestor`, so callers can access the
+ * storage interface like this:
+ *
+ * let storageSyncArea = Cc["@mozilla.org/extensions/storage/sync;1"]
+ * .getService(Ci.nsIInterfaceRequestor)
+ * .getInterface(Ci.mozIExtensionStorageArea);
+ *
+ * ...And the Sync interface like this:
+ *
+ * let extensionStorageEngine = Cc["@mozilla.org/extensions/storage/sync;1"]
+ * .getService(Ci.nsIInterfaceRequestor)
+ * .getInterface(Ci.mozIBridgedSyncEngine);
+ *
+ * @class
+ */
+export function StorageSyncService() {
+ if (StorageSyncService._singleton) {
+ return StorageSyncService._singleton;
+ }
+
+ let file = lazy.FileUtils.getFile("ProfD", ["storage-sync-v2.sqlite"]);
+ let kintoFile = lazy.FileUtils.getFile("ProfD", ["storage-sync.sqlite"]);
+ this._storageArea = new StorageSyncArea(file, kintoFile);
+
+ // Register a blocker to close the storage connection on shutdown.
+ this._shutdownBound = () => this._shutdown();
+ lazy.AsyncShutdown.profileChangeTeardown.addBlocker(
+ "StorageSyncService: shutdown",
+ this._shutdownBound
+ );
+
+ StorageSyncService._singleton = this;
+}
+
+StorageSyncService._singleton = null;
+
+StorageSyncService.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIInterfaceRequestor"]),
+
+ // Returns the storage and syncing interfaces. This just hands out a
+ // reference to the underlying storage area, with a quick check to make sure
+ // that callers are asking for the right interfaces.
+ getInterface(iid) {
+ if (
+ iid.equals(Ci.mozIExtensionStorageArea) ||
+ iid.equals(Ci.mozIBridgedSyncEngine)
+ ) {
+ return this._storageArea.QueryInterface(iid);
+ }
+ throw Components.Exception(
+ "This interface isn't implemented",
+ Cr.NS_ERROR_NO_INTERFACE
+ );
+ },
+
+ // Tears down the storage area and lifts the blocker so that shutdown can
+ // continue.
+ async _shutdown() {
+ try {
+ await new Promise((resolve, reject) => {
+ this._storageArea.teardown({
+ handleSuccess: resolve,
+ handleError(code, message) {
+ reject(Components.Exception(message, code));
+ },
+ });
+ });
+ } finally {
+ lazy.AsyncShutdown.profileChangeTeardown.removeBlocker(
+ this._shutdownBound
+ );
+ }
+ },
+};
diff --git a/toolkit/components/extensions/storage/components.conf b/toolkit/components/extensions/storage/components.conf
new file mode 100644
index 0000000000..a1d54fa542
--- /dev/null
+++ b/toolkit/components/extensions/storage/components.conf
@@ -0,0 +1,22 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ 'cid': '{f1e424f2-67fe-4f69-a8f8-3993a71f44fa}',
+ 'contract_ids': ['@mozilla.org/extensions/storage/internal/sync-area;1'],
+ 'type': 'mozIExtensionStorageArea',
+ 'headers': ['mozilla/extensions/storage/ExtensionStorageComponents.h'],
+ 'constructor': 'mozilla::extensions::storage::NewSyncArea',
+ },
+ {
+ 'cid': '{5b7047b4-fe17-4661-8e13-871402bc2023}',
+ 'contract_ids': ['@mozilla.org/extensions/storage/sync;1'],
+ 'esModule': 'resource://gre/modules/ExtensionStorageComponents.sys.mjs',
+ 'constructor': 'StorageSyncService',
+ 'singleton': True,
+ },
+]
diff --git a/toolkit/components/extensions/storage/moz.build b/toolkit/components/extensions/storage/moz.build
new file mode 100644
index 0000000000..85f52cdadb
--- /dev/null
+++ b/toolkit/components/extensions/storage/moz.build
@@ -0,0 +1,33 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("WebExtensions", "Storage")
+
+XPIDL_MODULE = "webextensions-storage"
+
+XPIDL_SOURCES += [
+ "mozIExtensionStorageArea.idl",
+]
+
+# Don't build the Rust `storage.sync` bridge for GeckoView, as it will expose
+# a delegate for consumers to use instead. Android Components can then provide
+# an implementation of the delegate that's backed by the Rust component. For
+# details, please see bug 1626506, comment 4.
+if CONFIG["MOZ_WIDGET_TOOLKIT"] != "android":
+ EXPORTS.mozilla.extensions.storage += [
+ "ExtensionStorageComponents.h",
+ ]
+
+ EXTRA_JS_MODULES += [
+ "ExtensionStorageComponents.sys.mjs",
+ ]
+
+ XPCOM_MANIFESTS += [
+ "components.conf",
+ ]
+
+FINAL_LIBRARY = "xul"
diff --git a/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl b/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl
new file mode 100644
index 0000000000..b3dcaa2479
--- /dev/null
+++ b/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl
@@ -0,0 +1,127 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface mozIExtensionStorageCallback;
+interface nsIFile;
+interface nsIVariant;
+
+// Implements the operations needed to support the `StorageArea` WebExtension
+// API.
+[scriptable, uuid(d8eb3ff1-9b4b-435a-99ca-5b8cbaba2420)]
+interface mozIExtensionStorageArea : nsISupports {
+ // These constants are exposed by the rust crate, but it's not worth the
+ // effort of jumping through the hoops to get them exposed to the JS
+ // code in a sane way - so we just duplicate them here. We should consider a
+ // test that checks they match the rust code.
+ // This interface is agnostic WRT the area, so we prefix the constants with
+ // the area - it's the consumer of this interface which knows what to use.
+ const unsigned long SYNC_QUOTA_BYTES = 102400;
+ const unsigned long SYNC_QUOTA_BYTES_PER_ITEM = 8192;
+ const unsigned long SYNC_MAX_ITEMS = 512;
+
+ // Sets one or more key-value pairs specified in `json` for the
+ // `extensionId`. If the `callback` implements
+ // `mozIExtensionStorageListener`, its `onChange`
+ // method will be called with the new and old values.
+ void set(in AUTF8String extensionId,
+ in AUTF8String json,
+ in mozIExtensionStorageCallback callback);
+
+ // Returns the value for the `key` in the storage area for the
+ // `extensionId`. `key` must be a JSON string containing either `null`,
+ // an array of string key names, a single string key name, or an object
+ // where the properties are the key names, and the values are the defaults
+ // if the key name doesn't exist in the storage area.
+ //
+ // If `get()` fails due to the quota being exceeded, the exception will
+ // have a result code of NS_ERROR_DOM_QUOTA_EXCEEDED_ERR (==0x80530016)
+ void get(in AUTF8String extensionId,
+ in AUTF8String key,
+ in mozIExtensionStorageCallback callback);
+
+ // Removes the `key` from the storage area for the `extensionId`. If `key`
+ // exists and the `callback` implements `mozIExtensionStorageListener`, its
+ // `onChanged` method will be called with the removed key-value pair.
+ void remove(in AUTF8String extensionId,
+ in AUTF8String key,
+ in mozIExtensionStorageCallback callback);
+
+ // Removes all keys from the storage area for the `extensionId`. If
+ // `callback` implements `mozIExtensionStorageListener`, its `onChange`
+ // method will be called with all removed key-value pairs.
+ void clear(in AUTF8String extensionId,
+ in mozIExtensionStorageCallback callback);
+
+ // Gets the number of bytes in use for the specified keys.
+ void getBytesInUse(in AUTF8String extensionId,
+ in AUTF8String keys,
+ in mozIExtensionStorageCallback callback);
+
+ // Gets and clears the information about the migration from the kinto
+ // database into the rust one. As "and clears" indicates, this will
+ // only produce a non-empty the first time it's called after a
+ // migration (which, hopefully, should only happen once).
+ void takeMigrationInfo(in mozIExtensionStorageCallback callback);
+};
+
+// Implements additional methods for setting up and tearing down the underlying
+// database connection for a storage area. This is a separate interface because
+// these methods are not part of the `StorageArea` API, and have restrictions on
+// when they can be called.
+[scriptable, uuid(2b008295-1bcc-4610-84f1-ad4cab2fa9ee)]
+interface mozIConfigurableExtensionStorageArea : nsISupports {
+ // Sets up the storage area. An area can only be configured once; calling
+ // `configure` multiple times will throw. `configure` must also be called
+ // before any of the `mozIExtensionStorageArea` methods, or they'll fail
+ // with errors.
+ // The second param is the path to the kinto database file from which we
+ // should migrate. This should always be specified even when there's a
+ // chance the file doesn't exist.
+ void configure(in nsIFile databaseFile, in nsIFile kintoFile);
+
+ // Tears down the storage area, closing the backing database connection.
+ // This is called automatically when Firefox shuts down. Once a storage area
+ // has been shut down, all its methods will fail with errors. If `configure`
+ // hasn't been called for this area yet, `teardown` is a no-op.
+ void teardown(in mozIExtensionStorageCallback callback);
+};
+
+// Implements additional methods for syncing a storage area. This is a separate
+// interface because these methods are not part of the `StorageArea` API, and
+// have restrictions on when they can be called.
+[scriptable, uuid(6dac82c9-1d8a-4893-8c0f-6e626aef802c)]
+interface mozISyncedExtensionStorageArea : nsISupports {
+ // If a sync is in progress, this method fetches pending change
+ // notifications for all extensions whose storage areas were updated.
+ // `callback` should implement `mozIExtensionStorageListener` to forward
+ // the records to `storage.onChanged` listeners. This method should only
+ // be called by Sync, after `mozIBridgedSyncEngine.apply` and before
+ // `syncFinished`. It fetches nothing if called at any other time.
+ void fetchPendingSyncChanges(in mozIExtensionStorageCallback callback);
+};
+
+// A listener for storage area notifications.
+[scriptable, uuid(8cb3c7e4-d0ca-4353-bccd-2673b4e11510)]
+interface mozIExtensionStorageListener : nsISupports {
+ // Notifies that an operation has data to pass to `storage.onChanged`
+ // listeners for the given `extensionId`. `json` is a JSON array of listener
+ // infos. If an operation affects multiple extensions, this method will be
+ // called multiple times, once per extension.
+ void onChanged(in AUTF8String extensionId, in AUTF8String json);
+};
+
+// A generic callback for a storage operation. Either `handleSuccess` or
+// `handleError` is guaranteed to be called once.
+[scriptable, uuid(870dca40-6602-4748-8493-c4253eb7f322)]
+interface mozIExtensionStorageCallback : nsISupports {
+ // Called when the operation completes. Operations that return a result,
+ // like `get`, will pass a `UTF8String` variant. Those that don't return
+ // anything, like `set` or `remove`, will pass a `null` variant.
+ void handleSuccess(in nsIVariant result);
+
+ // Called when the operation fails.
+ void handleError(in nsresult code, in AUTF8String message);
+};
diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml b/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml
new file mode 100644
index 0000000000..39c5bf92c6
--- /dev/null
+++ b/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml
@@ -0,0 +1,25 @@
+[package]
+name = "webext_storage_bridge"
+description = "The WebExtension `storage.sync` bindings for Firefox"
+version = "0.1.0"
+authors = ["The Firefox Sync Developers <sync-team@mozilla.com>"]
+edition = "2018"
+license = "MPL-2.0"
+
+[dependencies]
+anyhow = "1.0"
+atomic_refcell = "0.1"
+cstr = "0.2"
+golden_gate = { path = "../../../../../services/sync/golden_gate" }
+interrupt-support = "0.1"
+moz_task = { path = "../../../../../xpcom/rust/moz_task" }
+nserror = { path = "../../../../../xpcom/rust/nserror" }
+nsstring = { path = "../../../../../xpcom/rust/nsstring" }
+once_cell = "1"
+thin-vec = { version = "0.2.1", features = ["gecko-ffi"] }
+xpcom = { path = "../../../../../xpcom/rust/xpcom" }
+serde = "1"
+serde_json = "1"
+storage_variant = { path = "../../../../../storage/variant" }
+sql-support = "0.1"
+webext-storage = "0.1"
diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs
new file mode 100644
index 0000000000..1418ccca29
--- /dev/null
+++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs
@@ -0,0 +1,484 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::{
+ cell::{Ref, RefCell},
+ convert::TryInto,
+ ffi::OsString,
+ mem,
+ path::PathBuf,
+ str,
+ sync::Arc,
+};
+
+use golden_gate::{ApplyTask, BridgedEngine, FerryTask};
+use moz_task::{self, DispatchOptions, TaskRunnable};
+use nserror::{nsresult, NS_OK};
+use nsstring::{nsACString, nsCString, nsString};
+use thin_vec::ThinVec;
+use webext_storage::STORAGE_VERSION;
+use xpcom::{
+ interfaces::{
+ mozIBridgedSyncEngineApplyCallback, mozIBridgedSyncEngineCallback,
+ mozIExtensionStorageCallback, mozIServicesLogSink, nsIFile, nsISerialEventTarget,
+ },
+ RefPtr,
+};
+
+use crate::error::{Error, Result};
+use crate::punt::{Punt, PuntTask, TeardownTask};
+use crate::store::{LazyStore, LazyStoreConfig};
+
+fn path_from_nsifile(file: &nsIFile) -> Result<PathBuf> {
+ let mut raw_path = nsString::new();
+ // `nsIFile::GetPath` gives us a UTF-16-encoded version of its
+ // native path, which we must turn back into a platform-native
+ // string. We can't use `nsIFile::nativePath()` here because
+ // it's marked as `nostdcall`, which Rust doesn't support.
+ unsafe { file.GetPath(&mut *raw_path) }.to_result()?;
+ let native_path = {
+ // On Windows, we can create a native string directly from the
+ // encoded path.
+ #[cfg(windows)]
+ {
+ use std::os::windows::prelude::*;
+ OsString::from_wide(&raw_path)
+ }
+ // On other platforms, we must first decode the raw path from
+ // UTF-16, and then create our native string.
+ #[cfg(not(windows))]
+ OsString::from(String::from_utf16(&raw_path)?)
+ };
+ Ok(native_path.into())
+}
+
+/// An XPCOM component class for the Rust extension storage API. This class
+/// implements the interfaces needed for syncing and storage.
+///
+/// This class can be created on any thread, but must not be shared between
+/// threads. In Rust terms, it's `Send`, but not `Sync`.
+#[xpcom(
+ implement(
+ mozIExtensionStorageArea,
+ mozIConfigurableExtensionStorageArea,
+ mozISyncedExtensionStorageArea,
+ mozIInterruptible,
+ mozIBridgedSyncEngine
+ ),
+ nonatomic
+)]
+pub struct StorageSyncArea {
+ /// A background task queue, used to run all our storage operations on a
+ /// thread pool. Using a serial event target here means that all operations
+ /// will execute sequentially.
+ queue: RefPtr<nsISerialEventTarget>,
+ /// The store is lazily initialized on the task queue the first time it's
+ /// used.
+ store: RefCell<Option<Arc<LazyStore>>>,
+}
+
+/// `mozIExtensionStorageArea` implementation.
+impl StorageSyncArea {
+ /// Creates a storage area and its task queue.
+ pub fn new() -> Result<RefPtr<StorageSyncArea>> {
+ let queue = moz_task::create_background_task_queue(cstr!("StorageSyncArea"))?;
+ Ok(StorageSyncArea::allocate(InitStorageSyncArea {
+ queue,
+ store: RefCell::new(Some(Arc::default())),
+ }))
+ }
+
+ /// Returns the store for this area, or an error if it's been torn down.
+ fn store(&self) -> Result<Ref<'_, Arc<LazyStore>>> {
+ let maybe_store = self.store.borrow();
+ if maybe_store.is_some() {
+ Ok(Ref::map(maybe_store, |s| s.as_ref().unwrap()))
+ } else {
+ Err(Error::AlreadyTornDown)
+ }
+ }
+
+ /// Dispatches a task for a storage operation to the task queue.
+ fn dispatch(&self, punt: Punt, callback: &mozIExtensionStorageCallback) -> Result<()> {
+ let name = punt.name();
+ let task = PuntTask::new(Arc::downgrade(&*self.store()?), punt, callback)?;
+ let runnable = TaskRunnable::new(name, Box::new(task))?;
+ // `may_block` schedules the runnable on a dedicated I/O pool.
+ TaskRunnable::dispatch_with_options(
+ runnable,
+ self.queue.coerce(),
+ DispatchOptions::new().may_block(true),
+ )?;
+ Ok(())
+ }
+
+ xpcom_method!(
+ configure => Configure(
+ database_file: *const nsIFile,
+ kinto_file: *const nsIFile
+ )
+ );
+ /// Sets up the storage area.
+ fn configure(&self, database_file: &nsIFile, kinto_file: &nsIFile) -> Result<()> {
+ self.store()?.configure(LazyStoreConfig {
+ path: path_from_nsifile(database_file)?,
+ kinto_path: path_from_nsifile(kinto_file)?,
+ })?;
+ Ok(())
+ }
+
+ xpcom_method!(
+ set => Set(
+ ext_id: *const ::nsstring::nsACString,
+ json: *const ::nsstring::nsACString,
+ callback: *const mozIExtensionStorageCallback
+ )
+ );
+ /// Sets one or more key-value pairs.
+ fn set(
+ &self,
+ ext_id: &nsACString,
+ json: &nsACString,
+ callback: &mozIExtensionStorageCallback,
+ ) -> Result<()> {
+ self.dispatch(
+ Punt::Set {
+ ext_id: str::from_utf8(ext_id)?.into(),
+ value: serde_json::from_str(str::from_utf8(json)?)?,
+ },
+ callback,
+ )?;
+ Ok(())
+ }
+
+ xpcom_method!(
+ get => Get(
+ ext_id: *const ::nsstring::nsACString,
+ json: *const ::nsstring::nsACString,
+ callback: *const mozIExtensionStorageCallback
+ )
+ );
+ /// Gets values for one or more keys.
+ fn get(
+ &self,
+ ext_id: &nsACString,
+ json: &nsACString,
+ callback: &mozIExtensionStorageCallback,
+ ) -> Result<()> {
+ self.dispatch(
+ Punt::Get {
+ ext_id: str::from_utf8(ext_id)?.into(),
+ keys: serde_json::from_str(str::from_utf8(json)?)?,
+ },
+ callback,
+ )
+ }
+
+ xpcom_method!(
+ remove => Remove(
+ ext_id: *const ::nsstring::nsACString,
+ json: *const ::nsstring::nsACString,
+ callback: *const mozIExtensionStorageCallback
+ )
+ );
+ /// Removes one or more keys and their values.
+ fn remove(
+ &self,
+ ext_id: &nsACString,
+ json: &nsACString,
+ callback: &mozIExtensionStorageCallback,
+ ) -> Result<()> {
+ self.dispatch(
+ Punt::Remove {
+ ext_id: str::from_utf8(ext_id)?.into(),
+ keys: serde_json::from_str(str::from_utf8(json)?)?,
+ },
+ callback,
+ )
+ }
+
+ xpcom_method!(
+ clear => Clear(
+ ext_id: *const ::nsstring::nsACString,
+ callback: *const mozIExtensionStorageCallback
+ )
+ );
+ /// Removes all keys and values for the specified extension.
+ fn clear(&self, ext_id: &nsACString, callback: &mozIExtensionStorageCallback) -> Result<()> {
+ self.dispatch(
+ Punt::Clear {
+ ext_id: str::from_utf8(ext_id)?.into(),
+ },
+ callback,
+ )
+ }
+
+ xpcom_method!(
+ getBytesInUse => GetBytesInUse(
+ ext_id: *const ::nsstring::nsACString,
+ keys: *const ::nsstring::nsACString,
+ callback: *const mozIExtensionStorageCallback
+ )
+ );
+ /// Obtains the count of bytes in use for the specified key or for all keys.
+ fn getBytesInUse(
+ &self,
+ ext_id: &nsACString,
+ keys: &nsACString,
+ callback: &mozIExtensionStorageCallback,
+ ) -> Result<()> {
+ self.dispatch(
+ Punt::GetBytesInUse {
+ ext_id: str::from_utf8(ext_id)?.into(),
+ keys: serde_json::from_str(str::from_utf8(keys)?)?,
+ },
+ callback,
+ )
+ }
+
+ xpcom_method!(teardown => Teardown(callback: *const mozIExtensionStorageCallback));
+ /// Tears down the storage area, closing the backing database connection.
+ fn teardown(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
+ // Each storage task holds a `Weak` reference to the store, which it
+ // upgrades to an `Arc` (strong reference) when the task runs on the
+ // background queue. The strong reference is dropped when the task
+ // finishes. When we tear down the storage area, we relinquish our one
+ // owned strong reference to the `TeardownTask`. Because we're using a
+ // task queue, when the `TeardownTask` runs, it should have the only
+ // strong reference to the store, since all other tasks that called
+ // `Weak::upgrade` will have already finished. The `TeardownTask` can
+ // then consume the `Arc` and destroy the store.
+ let mut maybe_store = self.store.borrow_mut();
+ match mem::take(&mut *maybe_store) {
+ Some(store) => {
+ // Interrupt any currently-running statements.
+ store.interrupt();
+ // If dispatching the runnable fails, we'll leak the store
+ // without closing its database connection.
+ teardown(&self.queue, store, callback)?;
+ }
+ None => return Err(Error::AlreadyTornDown),
+ }
+ Ok(())
+ }
+
+ xpcom_method!(takeMigrationInfo => TakeMigrationInfo(callback: *const mozIExtensionStorageCallback));
+
+ /// Fetch-and-delete (e.g. `take`) information about the migration from the
+ /// kinto-based extension-storage to the rust-based storage.
+ fn takeMigrationInfo(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
+ self.dispatch(Punt::TakeMigrationInfo, callback)
+ }
+}
+
+fn teardown(
+ queue: &nsISerialEventTarget,
+ store: Arc<LazyStore>,
+ callback: &mozIExtensionStorageCallback,
+) -> Result<()> {
+ let task = TeardownTask::new(store, callback)?;
+ let runnable = TaskRunnable::new(TeardownTask::name(), Box::new(task))?;
+ TaskRunnable::dispatch_with_options(
+ runnable,
+ queue.coerce(),
+ DispatchOptions::new().may_block(true),
+ )?;
+ Ok(())
+}
+
+/// `mozISyncedExtensionStorageArea` implementation.
+impl StorageSyncArea {
+ xpcom_method!(
+ fetch_pending_sync_changes => FetchPendingSyncChanges(callback: *const mozIExtensionStorageCallback)
+ );
+ fn fetch_pending_sync_changes(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
+ self.dispatch(Punt::FetchPendingSyncChanges, callback)
+ }
+}
+
+/// `mozIInterruptible` implementation.
+impl StorageSyncArea {
+ xpcom_method!(
+ interrupt => Interrupt()
+ );
+ /// Interrupts any operations currently running on the background task
+ /// queue.
+ fn interrupt(&self) -> Result<()> {
+ self.store()?.interrupt();
+ Ok(())
+ }
+}
+
+/// `mozIBridgedSyncEngine` implementation.
+impl StorageSyncArea {
+ xpcom_method!(get_logger => GetLogger() -> *const mozIServicesLogSink);
+ fn get_logger(&self) -> Result<RefPtr<mozIServicesLogSink>> {
+ Err(NS_OK)?
+ }
+
+ xpcom_method!(set_logger => SetLogger(logger: *const mozIServicesLogSink));
+ fn set_logger(&self, _logger: Option<&mozIServicesLogSink>) -> Result<()> {
+ Ok(())
+ }
+
+ xpcom_method!(get_storage_version => GetStorageVersion() -> i32);
+ fn get_storage_version(&self) -> Result<i32> {
+ Ok(STORAGE_VERSION.try_into().unwrap())
+ }
+
+ // It's possible that migration, or even merging, will result in records
+ // too large for the server. We tolerate that (and hope that the addons do
+ // too :)
+ xpcom_method!(get_allow_skipped_record => GetAllowSkippedRecord() -> bool);
+ fn get_allow_skipped_record(&self) -> Result<bool> {
+ Ok(true)
+ }
+
+ xpcom_method!(
+ get_last_sync => GetLastSync(
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn get_last_sync(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
+ Ok(FerryTask::for_last_sync(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ set_last_sync => SetLastSync(
+ last_sync_millis: i64,
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn set_last_sync(
+ &self,
+ last_sync_millis: i64,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<()> {
+ Ok(
+ FerryTask::for_set_last_sync(self.new_bridge()?, last_sync_millis, callback)?
+ .dispatch(&self.queue)?,
+ )
+ }
+
+ xpcom_method!(
+ get_sync_id => GetSyncId(
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn get_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
+ Ok(FerryTask::for_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ reset_sync_id => ResetSyncId(
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn reset_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
+ Ok(FerryTask::for_reset_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ ensure_current_sync_id => EnsureCurrentSyncId(
+ new_sync_id: *const nsACString,
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn ensure_current_sync_id(
+ &self,
+ new_sync_id: &nsACString,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<()> {
+ Ok(
+ FerryTask::for_ensure_current_sync_id(self.new_bridge()?, new_sync_id, callback)?
+ .dispatch(&self.queue)?,
+ )
+ }
+
+ xpcom_method!(
+ sync_started => SyncStarted(
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn sync_started(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
+ Ok(FerryTask::for_sync_started(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ store_incoming => StoreIncoming(
+ incoming_envelopes_json: *const ThinVec<::nsstring::nsCString>,
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn store_incoming(
+ &self,
+ incoming_envelopes_json: Option<&ThinVec<nsCString>>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<()> {
+ Ok(FerryTask::for_store_incoming(
+ self.new_bridge()?,
+ incoming_envelopes_json.map(|v| v.as_slice()).unwrap_or(&[]),
+ callback,
+ )?
+ .dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(apply => Apply(callback: *const mozIBridgedSyncEngineApplyCallback));
+ fn apply(&self, callback: &mozIBridgedSyncEngineApplyCallback) -> Result<()> {
+ Ok(ApplyTask::new(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ set_uploaded => SetUploaded(
+ server_modified_millis: i64,
+ uploaded_ids: *const ThinVec<::nsstring::nsCString>,
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn set_uploaded(
+ &self,
+ server_modified_millis: i64,
+ uploaded_ids: Option<&ThinVec<nsCString>>,
+ callback: &mozIBridgedSyncEngineCallback,
+ ) -> Result<()> {
+ Ok(FerryTask::for_set_uploaded(
+ self.new_bridge()?,
+ server_modified_millis,
+ uploaded_ids.map(|v| v.as_slice()).unwrap_or(&[]),
+ callback,
+ )?
+ .dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ sync_finished => SyncFinished(
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn sync_finished(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
+ Ok(FerryTask::for_sync_finished(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ reset => Reset(
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn reset(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
+ Ok(FerryTask::for_reset(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ xpcom_method!(
+ wipe => Wipe(
+ callback: *const mozIBridgedSyncEngineCallback
+ )
+ );
+ fn wipe(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
+ Ok(FerryTask::for_wipe(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
+ }
+
+ fn new_bridge(&self) -> Result<Box<dyn BridgedEngine>> {
+ Ok(Box::new(self.store()?.get()?.bridged_engine()))
+ }
+}
diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/error.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/error.rs
new file mode 100644
index 0000000000..07b14b4e14
--- /dev/null
+++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/error.rs
@@ -0,0 +1,125 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::{error, fmt, result, str::Utf8Error, string::FromUtf16Error};
+
+use golden_gate::Error as GoldenGateError;
+use nserror::{
+ nsresult, NS_ERROR_ALREADY_INITIALIZED, NS_ERROR_CANNOT_CONVERT_DATA,
+ NS_ERROR_DOM_QUOTA_EXCEEDED_ERR, NS_ERROR_FAILURE, NS_ERROR_INVALID_ARG,
+ NS_ERROR_NOT_IMPLEMENTED, NS_ERROR_NOT_INITIALIZED, NS_ERROR_UNEXPECTED,
+};
+use serde_json::error::Error as JsonError;
+use webext_storage::error::Error as WebextStorageError;
+use webext_storage::error::ErrorKind as WebextStorageErrorKind;
+
+/// A specialized `Result` type for extension storage operations.
+pub type Result<T> = result::Result<T, Error>;
+
+/// The error type for extension storage operations. Errors can be converted
+/// into `nsresult` codes, and include more detailed messages that can be passed
+/// to callbacks.
+#[derive(Debug)]
+pub enum Error {
+ Nsresult(nsresult),
+ WebextStorage(WebextStorageError),
+ MigrationFailed(WebextStorageError),
+ GoldenGate(GoldenGateError),
+ MalformedString(Box<dyn error::Error + Send + Sync + 'static>),
+ AlreadyConfigured,
+ NotConfigured,
+ AlreadyRan(&'static str),
+ DidNotRun(&'static str),
+ AlreadyTornDown,
+ NotImplemented,
+}
+
+impl error::Error for Error {
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self {
+ Error::MalformedString(error) => Some(error.as_ref()),
+ _ => None,
+ }
+ }
+}
+
+impl From<nsresult> for Error {
+ fn from(result: nsresult) -> Error {
+ Error::Nsresult(result)
+ }
+}
+
+impl From<WebextStorageError> for Error {
+ fn from(error: WebextStorageError) -> Error {
+ Error::WebextStorage(error)
+ }
+}
+
+impl From<GoldenGateError> for Error {
+ fn from(error: GoldenGateError) -> Error {
+ Error::GoldenGate(error)
+ }
+}
+
+impl From<Utf8Error> for Error {
+ fn from(error: Utf8Error) -> Error {
+ Error::MalformedString(error.into())
+ }
+}
+
+impl From<FromUtf16Error> for Error {
+ fn from(error: FromUtf16Error) -> Error {
+ Error::MalformedString(error.into())
+ }
+}
+
+impl From<JsonError> for Error {
+ fn from(error: JsonError) -> Error {
+ Error::MalformedString(error.into())
+ }
+}
+
+impl From<Error> for nsresult {
+ fn from(error: Error) -> nsresult {
+ match error {
+ Error::Nsresult(result) => result,
+ Error::WebextStorage(e) => match e.kind() {
+ WebextStorageErrorKind::QuotaError(_) => NS_ERROR_DOM_QUOTA_EXCEEDED_ERR,
+ _ => NS_ERROR_FAILURE,
+ },
+ Error::MigrationFailed(_) => NS_ERROR_CANNOT_CONVERT_DATA,
+ Error::GoldenGate(error) => error.into(),
+ Error::MalformedString(_) => NS_ERROR_INVALID_ARG,
+ Error::AlreadyConfigured => NS_ERROR_ALREADY_INITIALIZED,
+ Error::NotConfigured => NS_ERROR_NOT_INITIALIZED,
+ Error::AlreadyRan(_) => NS_ERROR_UNEXPECTED,
+ Error::DidNotRun(_) => NS_ERROR_UNEXPECTED,
+ Error::AlreadyTornDown => NS_ERROR_UNEXPECTED,
+ Error::NotImplemented => NS_ERROR_NOT_IMPLEMENTED,
+ }
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Error::Nsresult(result) => write!(f, "Operation failed with {result}"),
+ Error::WebextStorage(error) => error.fmt(f),
+ Error::MigrationFailed(error) => write!(f, "Migration failed with {error}"),
+ Error::GoldenGate(error) => error.fmt(f),
+ Error::MalformedString(error) => error.fmt(f),
+ Error::AlreadyConfigured => write!(f, "The storage area is already configured"),
+ Error::NotConfigured => write!(
+ f,
+ "The storage area must be configured by calling `configure` first"
+ ),
+ Error::AlreadyRan(what) => write!(f, "`{what}` already ran on the background thread"),
+ Error::DidNotRun(what) => write!(f, "`{what}` didn't run on the background thread"),
+ Error::AlreadyTornDown => {
+ write!(f, "Can't use a storage area that's already torn down")
+ }
+ Error::NotImplemented => write!(f, "Operation not implemented"),
+ }
+ }
+}
diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/lib.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/lib.rs
new file mode 100644
index 0000000000..94133ef1e9
--- /dev/null
+++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/lib.rs
@@ -0,0 +1,65 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#![allow(non_snake_case)]
+
+//! This crate bridges the WebExtension storage area interfaces in Firefox
+//! Desktop to the extension storage Rust component in Application Services.
+//!
+//! ## How are the WebExtension storage APIs implemented in Firefox?
+//!
+//! There are three storage APIs available for WebExtensions:
+//! `storage.local`, which is stored locally in an IndexedDB database and never
+//! synced to other devices, `storage.sync`, which is stored in a local SQLite
+//! database and synced to all devices signed in to the same Firefox Account,
+//! and `storage.managed`, which is provisioned in a native manifest and
+//! read-only.
+//!
+//! * `storage.local` is implemented in `ExtensionStorageIDB.jsm`.
+//! * `storage.sync` is implemented in a Rust component, `webext_storage`. This
+//! Rust component is vendored in m-c, and exposed to JavaScript via an XPCOM
+//! API in `webext_storage_bridge` (this crate). Eventually, we'll change
+//! `ExtensionStorageSync.jsm` to call the XPCOM API instead of using the
+//! old Kinto storage adapter.
+//! * `storage.managed` is implemented directly in `parent/ext-storage.js`.
+//!
+//! `webext_storage_bridge` implements the `mozIExtensionStorageArea`
+//! (and, eventually, `mozIBridgedSyncEngine`) interface for `storage.sync`. The
+//! implementation is in `area::StorageSyncArea`, and is backed by the
+//! `webext_storage` component.
+
+#[macro_use]
+extern crate cstr;
+#[macro_use]
+extern crate xpcom;
+
+mod area;
+mod error;
+mod punt;
+mod store;
+
+use nserror::{nsresult, NS_OK};
+use xpcom::{interfaces::mozIExtensionStorageArea, RefPtr};
+
+use crate::area::StorageSyncArea;
+
+/// The constructor for a `storage.sync` area. This uses C linkage so that it
+/// can be called from C++. See `ExtensionStorageComponents.h` for the C++
+/// constructor that's passed to the component manager.
+///
+/// # Safety
+///
+/// This function is unsafe because it dereferences `result`.
+#[no_mangle]
+pub unsafe extern "C" fn NS_NewExtensionStorageSyncArea(
+ result: *mut *const mozIExtensionStorageArea,
+) -> nsresult {
+ match StorageSyncArea::new() {
+ Ok(bridge) => {
+ RefPtr::new(bridge.coerce::<mozIExtensionStorageArea>()).forget(&mut *result);
+ NS_OK
+ }
+ Err(err) => err.into(),
+ }
+}
diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs
new file mode 100644
index 0000000000..4740237942
--- /dev/null
+++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs
@@ -0,0 +1,321 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::{
+ borrow::Borrow,
+ fmt::Write,
+ mem, result, str,
+ sync::{Arc, Weak},
+};
+
+use atomic_refcell::AtomicRefCell;
+use moz_task::{Task, ThreadPtrHandle, ThreadPtrHolder};
+use nserror::nsresult;
+use nsstring::nsCString;
+use serde::Serialize;
+use serde_json::Value as JsonValue;
+use storage_variant::VariantType;
+use xpcom::{
+ interfaces::{mozIExtensionStorageCallback, mozIExtensionStorageListener},
+ RefPtr, XpCom,
+};
+
+use crate::error::{Error, Result};
+use crate::store::LazyStore;
+
+/// A storage operation that's punted from the main thread to the background
+/// task queue.
+pub enum Punt {
+ /// Get the values of the keys for an extension.
+ Get { ext_id: String, keys: JsonValue },
+ /// Set a key-value pair for an extension.
+ Set { ext_id: String, value: JsonValue },
+ /// Remove one or more keys for an extension.
+ Remove { ext_id: String, keys: JsonValue },
+ /// Clear all keys and values for an extension.
+ Clear { ext_id: String },
+ /// Returns the bytes in use for the specified, or all, keys.
+ GetBytesInUse { ext_id: String, keys: JsonValue },
+ /// Fetches all pending Sync change notifications to pass to
+ /// `storage.onChanged` listeners.
+ FetchPendingSyncChanges,
+ /// Fetch-and-delete (e.g. `take`) information about the migration from the
+ /// kinto-based extension-storage to the rust-based storage.
+ ///
+ /// This data is stored in the database instead of just being returned by
+ /// the call to `migrate`, as we may migrate prior to telemetry being ready.
+ TakeMigrationInfo,
+}
+
+impl Punt {
+ /// Returns the operation name, used to label the task runnable and report
+ /// errors.
+ pub fn name(&self) -> &'static str {
+ match self {
+ Punt::Get { .. } => "webext_storage::get",
+ Punt::Set { .. } => "webext_storage::set",
+ Punt::Remove { .. } => "webext_storage::remove",
+ Punt::Clear { .. } => "webext_storage::clear",
+ Punt::GetBytesInUse { .. } => "webext_storage::get_bytes_in_use",
+ Punt::FetchPendingSyncChanges => "webext_storage::fetch_pending_sync_changes",
+ Punt::TakeMigrationInfo => "webext_storage::take_migration_info",
+ }
+ }
+}
+
+/// A storage operation result, punted from the background queue back to the
+/// main thread.
+#[derive(Default)]
+struct PuntResult {
+ changes: Vec<Change>,
+ value: Option<String>,
+}
+
+/// A change record for an extension.
+struct Change {
+ ext_id: String,
+ json: String,
+}
+
+impl PuntResult {
+ /// Creates a result with a single change to pass to `onChanged`, and no
+ /// return value for `handleSuccess`. The `Borrow` bound lets this method
+ /// take either a borrowed reference or an owned value.
+ fn with_change<T: Borrow<S>, S: Serialize>(ext_id: &str, changes: T) -> Result<Self> {
+ Ok(PuntResult {
+ changes: vec![Change {
+ ext_id: ext_id.into(),
+ json: serde_json::to_string(changes.borrow())?,
+ }],
+ value: None,
+ })
+ }
+
+ /// Creates a result with changes for multiple extensions to pass to
+ /// `onChanged`, and no return value for `handleSuccess`.
+ fn with_changes(changes: Vec<Change>) -> Self {
+ PuntResult {
+ changes,
+ value: None,
+ }
+ }
+
+ /// Creates a result with no changes to pass to `onChanged`, and a return
+ /// value for `handleSuccess`.
+ fn with_value<T: Borrow<S>, S: Serialize>(value: T) -> Result<Self> {
+ Ok(PuntResult {
+ changes: Vec::new(),
+ value: Some(serde_json::to_string(value.borrow())?),
+ })
+ }
+}
+
+/// A generic task used for all storage operations. Punts the operation to the
+/// background task queue, receives a result back on the main thread, and calls
+/// the callback with it.
+pub struct PuntTask {
+ name: &'static str,
+ /// Storage tasks hold weak references to the store, which they upgrade
+ /// to strong references when running on the background queue. This
+ /// ensures that pending storage tasks don't block teardown (for example,
+ /// if a consumer calls `get` and then `teardown`, without waiting for
+ /// `get` to finish).
+ store: Weak<LazyStore>,
+ punt: AtomicRefCell<Option<Punt>>,
+ callback: ThreadPtrHandle<mozIExtensionStorageCallback>,
+ result: AtomicRefCell<Result<PuntResult>>,
+}
+
+impl PuntTask {
+ /// Creates a storage task that punts an operation to the background queue.
+ /// Returns an error if the task couldn't be created because the thread
+ /// manager is shutting down.
+ pub fn new(
+ store: Weak<LazyStore>,
+ punt: Punt,
+ callback: &mozIExtensionStorageCallback,
+ ) -> Result<Self> {
+ let name = punt.name();
+ Ok(Self {
+ name,
+ store,
+ punt: AtomicRefCell::new(Some(punt)),
+ callback: ThreadPtrHolder::new(
+ cstr!("mozIExtensionStorageCallback"),
+ RefPtr::new(callback),
+ )?,
+ result: AtomicRefCell::new(Err(Error::DidNotRun(name))),
+ })
+ }
+
+ /// Upgrades the task's weak `LazyStore` reference to a strong one. Returns
+ /// an error if the store has been torn down.
+ ///
+ /// It's important that this is called on the background queue, after the
+ /// task has been dispatched. Storage tasks shouldn't hold strong references
+ /// to the store on the main thread, because then they might block teardown.
+ fn store(&self) -> Result<Arc<LazyStore>> {
+ match self.store.upgrade() {
+ Some(store) => Ok(store),
+ None => Err(Error::AlreadyTornDown),
+ }
+ }
+
+ /// Runs this task's storage operation on the background queue.
+ fn inner_run(&self, punt: Punt) -> Result<PuntResult> {
+ Ok(match punt {
+ Punt::Set { ext_id, value } => {
+ PuntResult::with_change(&ext_id, self.store()?.get()?.set(&ext_id, value)?)?
+ }
+ Punt::Get { ext_id, keys } => {
+ PuntResult::with_value(self.store()?.get()?.get(&ext_id, keys)?)?
+ }
+ Punt::Remove { ext_id, keys } => {
+ PuntResult::with_change(&ext_id, self.store()?.get()?.remove(&ext_id, keys)?)?
+ }
+ Punt::Clear { ext_id } => {
+ PuntResult::with_change(&ext_id, self.store()?.get()?.clear(&ext_id)?)?
+ }
+ Punt::GetBytesInUse { ext_id, keys } => {
+ PuntResult::with_value(self.store()?.get()?.get_bytes_in_use(&ext_id, keys)?)?
+ }
+ Punt::FetchPendingSyncChanges => PuntResult::with_changes(
+ self.store()?
+ .get()?
+ .get_synced_changes()?
+ .into_iter()
+ .map(|info| Change {
+ ext_id: info.ext_id,
+ json: info.changes,
+ })
+ .collect(),
+ ),
+ Punt::TakeMigrationInfo => {
+ PuntResult::with_value(self.store()?.get()?.take_migration_info()?)?
+ }
+ })
+ }
+}
+
+impl Task for PuntTask {
+ fn run(&self) {
+ *self.result.borrow_mut() = match self.punt.borrow_mut().take() {
+ Some(punt) => self.inner_run(punt),
+ // A task should never run on the background queue twice, but we
+ // return an error just in case.
+ None => Err(Error::AlreadyRan(self.name)),
+ };
+ }
+
+ fn done(&self) -> result::Result<(), nsresult> {
+ let callback = self.callback.get().unwrap();
+ // As above, `done` should never be called multiple times, but we handle
+ // that by returning an error.
+ match mem::replace(
+ &mut *self.result.borrow_mut(),
+ Err(Error::AlreadyRan(self.name)),
+ ) {
+ Ok(PuntResult { changes, value }) => {
+ // If we have change data, and the callback implements the
+ // listener interface, notify about it first.
+ if let Some(listener) = callback.query_interface::<mozIExtensionStorageListener>() {
+ for Change { ext_id, json } in changes {
+ // Ignore errors.
+ let _ = unsafe {
+ listener.OnChanged(&*nsCString::from(ext_id), &*nsCString::from(json))
+ };
+ }
+ }
+ let result = value.map(nsCString::from).into_variant();
+ unsafe { callback.HandleSuccess(result.coerce()) }
+ }
+ Err(err) => {
+ let mut message = nsCString::new();
+ write!(message, "{err}").unwrap();
+ unsafe { callback.HandleError(err.into(), &*message) }
+ }
+ }
+ .to_result()
+ }
+}
+
+/// A task to tear down the store on the background task queue.
+pub struct TeardownTask {
+ /// Unlike storage tasks, the teardown task holds a strong reference to
+ /// the store, which it drops on the background queue. This is the only
+ /// task that should do that.
+ store: AtomicRefCell<Option<Arc<LazyStore>>>,
+ callback: ThreadPtrHandle<mozIExtensionStorageCallback>,
+ result: AtomicRefCell<Result<()>>,
+}
+
+impl TeardownTask {
+ /// Creates a teardown task. This should only be created and dispatched
+ /// once, to clean up the store at shutdown. Returns an error if the task
+ /// couldn't be created because the thread manager is shutting down.
+ pub fn new(store: Arc<LazyStore>, callback: &mozIExtensionStorageCallback) -> Result<Self> {
+ Ok(Self {
+ store: AtomicRefCell::new(Some(store)),
+ callback: ThreadPtrHolder::new(
+ cstr!("mozIExtensionStorageCallback"),
+ RefPtr::new(callback),
+ )?,
+ result: AtomicRefCell::new(Err(Error::DidNotRun(Self::name()))),
+ })
+ }
+
+ /// Returns the task name, used to label its runnable and report errors.
+ pub fn name() -> &'static str {
+ "webext_storage::teardown"
+ }
+
+ /// Tears down and drops the store on the background queue.
+ fn inner_run(&self, store: Arc<LazyStore>) -> Result<()> {
+ // At this point, we should be holding the only strong reference
+ // to the store, since 1) `StorageSyncArea` gave its one strong
+ // reference to our task, and 2) we're running on a background
+ // task queue, which runs all tasks sequentially...so no other
+ // `PuntTask`s should be running and trying to upgrade their
+ // weak references. So we can unwrap the `Arc` and take ownership
+ // of the store.
+ match Arc::try_unwrap(store) {
+ Ok(store) => store.teardown(),
+ Err(_) => {
+ // If unwrapping the `Arc` fails, someone else must have
+ // a strong reference to the store. We could sleep and
+ // try again, but this is so unexpected that it's easier
+ // to just leak the store, and return an error to the
+ // callback. Except in tests, we only call `teardown` at
+ // shutdown, so the resources will get reclaimed soon,
+ // anyway.
+ Err(Error::DidNotRun(Self::name()))
+ }
+ }
+ }
+}
+
+impl Task for TeardownTask {
+ fn run(&self) {
+ *self.result.borrow_mut() = match self.store.borrow_mut().take() {
+ Some(store) => self.inner_run(store),
+ None => Err(Error::AlreadyRan(Self::name())),
+ };
+ }
+
+ fn done(&self) -> result::Result<(), nsresult> {
+ let callback = self.callback.get().unwrap();
+ match mem::replace(
+ &mut *self.result.borrow_mut(),
+ Err(Error::AlreadyRan(Self::name())),
+ ) {
+ Ok(()) => unsafe { callback.HandleSuccess(().into_variant().coerce()) },
+ Err(err) => {
+ let mut message = nsCString::new();
+ write!(message, "{err}").unwrap();
+ unsafe { callback.HandleError(err.into(), &*message) }
+ }
+ }
+ .to_result()
+ }
+}
diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs
new file mode 100644
index 0000000000..79189f4761
--- /dev/null
+++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs
@@ -0,0 +1,136 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::{fs::remove_file, path::PathBuf, sync::Arc};
+
+use interrupt_support::SqlInterruptHandle;
+use once_cell::sync::OnceCell;
+use webext_storage::store::Store;
+
+use crate::error::{self, Error};
+
+/// Options for an extension storage area.
+pub struct LazyStoreConfig {
+ /// The path to the database file for this storage area.
+ pub path: PathBuf,
+ /// The path to the old kinto database. If it exists, we should attempt to
+ /// migrate from this database as soon as we open our DB. It's not Option<>
+ /// because the caller will not have checked whether it exists or not, so
+ /// will assume it might.
+ pub kinto_path: PathBuf,
+}
+
+/// A lazy store is automatically initialized on a background thread with its
+/// configuration the first time it's used.
+#[derive(Default)]
+pub struct LazyStore {
+ store: OnceCell<InterruptStore>,
+ config: OnceCell<LazyStoreConfig>,
+}
+
+/// An `InterruptStore` wraps an inner extension store, and its interrupt
+/// handle.
+struct InterruptStore {
+ inner: Store,
+ handle: Arc<SqlInterruptHandle>,
+}
+
+impl LazyStore {
+ /// Configures the lazy store. Returns an error if the store has already
+ /// been configured. This method should be called from the main thread.
+ pub fn configure(&self, config: LazyStoreConfig) -> error::Result<()> {
+ self.config
+ .set(config)
+ .map_err(|_| Error::AlreadyConfigured)
+ }
+
+ /// Interrupts all pending operations on the store. If a database statement
+ /// is currently running, this will interrupt that statement. If the
+ /// statement is a write inside an active transaction, the entire
+ /// transaction will be rolled back. This method should be called from the
+ /// main thread.
+ pub fn interrupt(&self) {
+ if let Some(outer) = self.store.get() {
+ outer.handle.interrupt();
+ }
+ }
+
+ /// Returns the underlying store, initializing it if needed. This method
+ /// should only be called from a background thread or task queue, since
+ /// opening the database does I/O.
+ pub fn get(&self) -> error::Result<&Store> {
+ Ok(&self
+ .store
+ .get_or_try_init(|| match self.config.get() {
+ Some(config) => {
+ let store = init_store(config)?;
+ let handle = store.interrupt_handle();
+ Ok(InterruptStore {
+ inner: store,
+ handle,
+ })
+ }
+ None => Err(Error::NotConfigured),
+ })?
+ .inner)
+ }
+
+ /// Tears down the store. If the store wasn't initialized, this is a no-op.
+ /// This should only be called from a background thread or task queue,
+ /// because closing the database also does I/O.
+ pub fn teardown(self) -> error::Result<()> {
+ if let Some(store) = self.store.into_inner() {
+ store.inner.close()?;
+ }
+ Ok(())
+ }
+}
+
+// Initialize the store, performing a migration if necessary.
+// The requirements for migration are, roughly:
+// * If kinto_path doesn't exist, we don't try to migrate.
+// * If our DB path exists, we assume we've already migrated and don't try again
+// * If the migration fails, we close our store and delete the DB, then return
+// a special error code which tells our caller about the failure. It's then
+// expected to fallback to the "old" kinto store and we'll try next time.
+// Note that the migrate() method on the store is written such that is should
+// ignore all "read" errors from the source, but propagate "write" errors on our
+// DB - the intention is that things like corrupted source databases never fail,
+// but disk-space failures on our database does.
+fn init_store(config: &LazyStoreConfig) -> error::Result<Store> {
+ let should_migrate = config.kinto_path.exists() && !config.path.exists();
+ let store = Store::new(&config.path)?;
+ if should_migrate {
+ match store.migrate(&config.kinto_path) {
+ // It's likely to be too early for us to stick the MigrationInfo
+ // into the sync telemetry, a separate call to `take_migration_info`
+ // must be made to the store (this is done by telemetry after it's
+ // ready to submit the data).
+ Ok(()) => {
+ // need logging, but for now let's print to stdout.
+ println!("extension-storage: migration complete");
+ Ok(store)
+ }
+ Err(e) => {
+ println!("extension-storage: migration failure: {e}");
+ if let Err(e) = store.close() {
+ // welp, this probably isn't going to end well...
+ println!(
+ "extension-storage: failed to close the store after migration failure: {e}"
+ );
+ }
+ if let Err(e) = remove_file(&config.path) {
+ // this is bad - if it happens regularly it will defeat
+ // out entire migration strategy - we'll assume it
+ // worked.
+ // So it's desirable to make noise if this happens.
+ println!("Failed to remove file after failed migration: {e}");
+ }
+ Err(Error::MigrationFailed(e))
+ }
+ }
+ } else {
+ Ok(store)
+ }
+}