diff options
Diffstat (limited to '')
52 files changed, 14578 insertions, 0 deletions
diff --git a/services/common/app_services_logger/AppServicesLoggerComponents.h b/services/common/app_services_logger/AppServicesLoggerComponents.h new file mode 100644 index 0000000000..015eae230b --- /dev/null +++ b/services/common/app_services_logger/AppServicesLoggerComponents.h @@ -0,0 +1,38 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_services_AppServicesLoggerComponents_h_ +#define mozilla_services_AppServicesLoggerComponents_h_ + +#include "mozIAppServicesLogger.h" +#include "nsCOMPtr.h" + +extern "C" { + +// Implemented in Rust, in the `app_services_logger` crate. +nsresult NS_NewAppServicesLogger(mozIAppServicesLogger** aResult); + +} // extern "C" + +namespace mozilla { +namespace appservices { + +// The C++ constructor for a `services.appServicesLogger` service. This wrapper +// exists because `components.conf` requires a component class constructor to +// return an `already_AddRefed<T>`, but Rust doesn't have such a type. So we +// call the Rust constructor using a `nsCOMPtr` (which is compatible with Rust's +// `xpcom::RefPtr`) out param, and return that. +already_AddRefed<mozIAppServicesLogger> NewLogService() { + nsCOMPtr<mozIAppServicesLogger> logger; + nsresult rv = NS_NewAppServicesLogger(getter_AddRefs(logger)); + if (NS_WARN_IF(NS_FAILED(rv))) { + return nullptr; + } + return logger.forget(); +} + +} // namespace appservices +} // namespace mozilla + +#endif // mozilla_services_AppServicesLoggerComponents_h_ diff --git a/services/common/app_services_logger/Cargo.toml b/services/common/app_services_logger/Cargo.toml new file mode 100644 index 0000000000..d68c378845 --- /dev/null +++ b/services/common/app_services_logger/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "app_services_logger" +version = "0.1.0" +authors = ["lougeniac64 <lougeniaC64@users.noreply.github.com>"] +edition = "2018" +license = "MPL-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +cstr = "0.2" +golden_gate = { path = "../../../services/sync/golden_gate" } +log = "0.4" +once_cell = "1.4.0" +nserror = { path = "../../../xpcom/rust/nserror" } +nsstring = { path = "../../../xpcom/rust/nsstring" } +xpcom = { path = "../../../xpcom/rust/xpcom" } diff --git a/services/common/app_services_logger/components.conf b/services/common/app_services_logger/components.conf new file mode 100644 index 0000000000..cdea60a04b --- /dev/null +++ b/services/common/app_services_logger/components.conf @@ -0,0 +1,15 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +Classes = [ + { + 'cid': '{d2716568-f5fa-4989-91dd-e11599e932a1}', + 'contract_ids': ['@mozilla.org/appservices/logger;1'], + 'type': 'mozIAppServicesLogger', + 'headers': ['mozilla/appservices/AppServicesLoggerComponents.h'], + 'constructor': 'mozilla::appservices::NewLogService', + }, +] diff --git a/services/common/app_services_logger/src/lib.rs b/services/common/app_services_logger/src/lib.rs new file mode 100644 index 0000000000..92125b62c4 --- /dev/null +++ b/services/common/app_services_logger/src/lib.rs @@ -0,0 +1,128 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +//! This provides a XPCOM service to send app services logs to the desktop + +#[macro_use] +extern crate cstr; + +#[macro_use] +extern crate xpcom; + +use golden_gate::log::LogSink; +use nserror::{nsresult, NS_OK}; +use nsstring::nsAString; +use once_cell::sync::Lazy; +use std::os::raw::c_char; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicBool, Ordering}, + RwLock, + }, +}; +use xpcom::{ + interfaces::{mozIAppServicesLogger, mozIServicesLogSink, nsIObserverService, nsISupports}, + RefPtr, +}; + +/// A flag that's set after we register our observer to clear the map of loggers +/// on shutdown. +static SHUTDOWN_OBSERVED: AtomicBool = AtomicBool::new(false); + +#[xpcom(implement(mozIAppServicesLogger), nonatomic)] +pub struct AppServicesLogger {} + +pub static LOGGERS_BY_TARGET: Lazy<RwLock<HashMap<String, LogSink>>> = Lazy::new(|| { + let h: HashMap<String, LogSink> = HashMap::new(); + let m = RwLock::new(h); + m +}); + +impl AppServicesLogger { + xpcom_method!(register => Register(target: *const nsAString, logger: *const mozIServicesLogSink)); + fn register(&self, target: &nsAString, logger: &mozIServicesLogSink) -> Result<(), nsresult> { + let log_sink_logger = LogSink::with_logger(Some(logger))?; + + ensure_observing_shutdown(); + + LOGGERS_BY_TARGET + .write() + .unwrap() + .insert(target.to_string(), log_sink_logger); + Ok(()) + } + + pub fn is_app_services_logger_registered(target: String) -> bool { + match LOGGERS_BY_TARGET.read() { + Ok(loggers_by_target) => loggers_by_target.contains_key(&target), + Err(_e) => false, + } + } +} + +// Import the `NS_IsMainThread` symbol from Gecko... +extern "C" { + fn NS_IsMainThread() -> bool; +} + +/// Registers an observer to clear the loggers map on `xpcom-shutdown`. This +/// function must be called from the main thread, because the observer service +//// is main thread-only. +fn ensure_observing_shutdown() { + assert!(unsafe { NS_IsMainThread() }); + // If we've already registered our observer, bail. Relaxed ordering is safe + // here and below, because we've asserted we're only called from the main + // thread, and only check the flag here. + if SHUTDOWN_OBSERVED.load(Ordering::Relaxed) { + return; + } + if let Ok(service) = xpcom::components::Observer::service::<nsIObserverService>() { + let observer = ShutdownObserver::allocate(InitShutdownObserver {}); + let rv = unsafe { + service.AddObserver(observer.coerce(), cstr!("xpcom-shutdown").as_ptr(), false) + }; + // If we fail to register the observer now, or fail to get the observer + // service, the flag will remain `false`, and we'll try again on the + // next call to `ensure_observing_shutdown`. + SHUTDOWN_OBSERVED.store(rv.succeeded(), Ordering::Relaxed); + } +} + +#[xpcom(implement(nsIObserver), nonatomic)] +struct ShutdownObserver {} + +impl ShutdownObserver { + xpcom_method!(observe => Observe(_subject: *const nsISupports, topic: *const c_char, _data: *const u16)); + /// Remove our shutdown observer and clear the map. + fn observe( + &self, + _subject: &nsISupports, + topic: *const c_char, + _data: *const u16, + ) -> Result<(), nsresult> { + LOGGERS_BY_TARGET.write().unwrap().clear(); + if let Ok(service) = xpcom::components::Observer::service::<nsIObserverService>() { + // Ignore errors, since we're already shutting down. + let _ = unsafe { service.RemoveObserver(self.coerce(), topic) }; + } + Ok(()) + } +} + +/// The constructor for an `AppServicesLogger` service. This uses C linkage so that it +/// can be called from C++. See `AppServicesLoggerComponents.h` for the C++ +/// constructor that's passed to the component manager. +/// +/// # Safety +/// +/// This function is unsafe because it dereferences `result`. +#[no_mangle] +pub unsafe extern "C" fn NS_NewAppServicesLogger( + result: *mut *const mozIAppServicesLogger, +) -> nsresult { + let logger = AppServicesLogger::allocate(InitAppServicesLogger {}); + RefPtr::new(logger.coerce::<mozIAppServicesLogger>()).forget(&mut *result); + NS_OK +} diff --git a/services/common/async.sys.mjs b/services/common/async.sys.mjs new file mode 100644 index 0000000000..564b46a071 --- /dev/null +++ b/services/common/async.sys.mjs @@ -0,0 +1,301 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const Timer = Components.Constructor("@mozilla.org/timer;1", "nsITimer"); + +/* + * Helpers for various async operations. + */ +export var Async = { + /** + * Execute an arbitrary number of asynchronous functions one after the + * other, passing the callback arguments on to the next one. All functions + * must take a callback function as their last argument. The 'this' object + * will be whatever chain()'s is. + * + * @usage this._chain = Async.chain; + * this._chain(this.foo, this.bar, this.baz)(args, for, foo) + * + * This is equivalent to: + * + * let self = this; + * self.foo(args, for, foo, function (bars, args) { + * self.bar(bars, args, function (baz, params) { + * self.baz(baz, params); + * }); + * }); + */ + chain: function chain(...funcs) { + let thisObj = this; + return function callback() { + if (funcs.length) { + let args = [...arguments, callback]; + let f = funcs.shift(); + f.apply(thisObj, args); + } + }; + }, + + /** + * Check if the app is still ready (not quitting). Returns true, or throws an + * exception if not ready. + */ + checkAppReady: function checkAppReady() { + // Watch for app-quit notification to stop any sync calls + Services.obs.addObserver(function onQuitApplication() { + Services.obs.removeObserver(onQuitApplication, "quit-application"); + Async.checkAppReady = Async.promiseYield = function () { + let exception = Components.Exception( + "App. Quitting", + Cr.NS_ERROR_ABORT + ); + exception.appIsShuttingDown = true; + throw exception; + }; + }, "quit-application"); + // In the common case, checkAppReady just returns true + return (Async.checkAppReady = function () { + return true; + })(); + }, + + /** + * Check if the app is still ready (not quitting). Returns true if the app + * is ready, or false if it is being shut down. + */ + isAppReady() { + try { + return Async.checkAppReady(); + } catch (ex) { + if (!Async.isShutdownException(ex)) { + throw ex; + } + } + return false; + }, + + /** + * Check if the passed exception is one raised by checkAppReady. Typically + * this will be used in exception handlers to allow such exceptions to + * make their way to the top frame and allow the app to actually terminate. + */ + isShutdownException(exception) { + return exception && exception.appIsShuttingDown === true; + }, + + /** + * A "tight loop" of promises can still lock up the browser for some time. + * Periodically waiting for a promise returned by this function will solve + * that. + * You should probably not use this method directly and instead use jankYielder + * below. + * Some reference here: + * - https://gist.github.com/jesstelford/bbb30b983bddaa6e5fef2eb867d37678 + * - https://bugzilla.mozilla.org/show_bug.cgi?id=1094248 + */ + promiseYield() { + return new Promise(resolve => { + Services.tm.currentThread.dispatch(resolve, Ci.nsIThread.DISPATCH_NORMAL); + }); + }, + + /** + * Shared state for yielding every N calls. + * + * Can be passed to multiple Async.yieldingForEach to have them overall yield + * every N iterations. + */ + yieldState(yieldEvery = 50) { + let iterations = 0; + + return { + shouldYield() { + ++iterations; + return iterations % yieldEvery === 0; + }, + }; + }, + + /** + * Apply the given function to each element of the iterable, yielding the + * event loop every yieldEvery iterations. + * + * @param iterable {Iterable} + * The iterable or iterator to iterate through. + * + * @param fn {(*) -> void|boolean} + * The function to be called on each element of the iterable. + * + * Returning true from the function will stop the iteration. + * + * @param [yieldEvery = 50] {number|object} + * The number of iterations to complete before yielding back to the event + * loop. + * + * @return {boolean} + * Whether or not the function returned early. + */ + async yieldingForEach(iterable, fn, yieldEvery = 50) { + const yieldState = + typeof yieldEvery === "number" + ? Async.yieldState(yieldEvery) + : yieldEvery; + let iteration = 0; + + for (const item of iterable) { + let result = fn(item, iteration++); + if (typeof result !== "undefined" && typeof result.then !== "undefined") { + // If we await result when it is not a Promise, we create an + // automatically resolved promise, which is exactly the case that we + // are trying to avoid. + result = await result; + } + + if (result === true) { + return true; + } + + if (yieldState.shouldYield()) { + await Async.promiseYield(); + Async.checkAppReady(); + } + } + + return false; + }, + + asyncQueueCaller(log) { + return new AsyncQueueCaller(log); + }, + + asyncObserver(log, obj) { + return new AsyncObserver(log, obj); + }, + + watchdog() { + return new Watchdog(); + }, +}; + +/** + * Allows consumers to enqueue asynchronous callbacks to be called in order. + * Typically this is used when providing a callback to a caller that doesn't + * await on promises. + */ +class AsyncQueueCaller { + constructor(log) { + this._log = log; + this._queue = Promise.resolve(); + this.QueryInterface = ChromeUtils.generateQI([ + "nsIObserver", + "nsISupportsWeakReference", + ]); + } + + /** + * /!\ Never await on another function that calls enqueueCall /!\ + * on the same queue or we will deadlock. + */ + enqueueCall(func) { + this._queue = (async () => { + await this._queue; + try { + return await func(); + } catch (e) { + this._log.error(e); + return false; + } + })(); + } + + promiseCallsComplete() { + return this._queue; + } +} + +/* + * Subclass of AsyncQueueCaller that can be used with Services.obs directly. + * When this observe() is called, it will enqueue a call to the consumers's + * observe(). + */ +class AsyncObserver extends AsyncQueueCaller { + constructor(obj, log) { + super(log); + this.obj = obj; + } + + observe(subject, topic, data) { + this.enqueueCall(() => this.obj.observe(subject, topic, data)); + } + + promiseObserversComplete() { + return this.promiseCallsComplete(); + } +} + +/** + * Woof! Signals an operation to abort, either at shutdown or after a timeout. + * The buffered engine uses this to abort long-running merges, so that they + * don't prevent Firefox from quitting, or block future syncs. + */ +class Watchdog { + constructor() { + this.controller = new AbortController(); + this.timer = new Timer(); + + /** + * The reason for signaling an abort. `null` if not signaled, + * `"timeout"` if the watchdog timer fired, or `"shutdown"` if the app is + * is quitting. + * + * @type {String?} + */ + this.abortReason = null; + } + + /** + * Returns the abort signal for this watchdog. This can be passed to APIs + * that take a signal for cancellation, like `SyncedBookmarksMirror::apply` + * or `fetch`. + * + * @type {AbortSignal} + */ + get signal() { + return this.controller.signal; + } + + /** + * Starts the watchdog timer, and listens for the app quitting. + * + * @param {Number} delay + * The time to wait before signaling the operation to abort. + */ + start(delay) { + if (!this.signal.aborted) { + Services.obs.addObserver(this, "quit-application"); + this.timer.init(this, delay, Ci.nsITimer.TYPE_ONE_SHOT); + } + } + + /** + * Stops the watchdog timer and removes any listeners. This should be called + * after the operation finishes. + */ + stop() { + if (!this.signal.aborted) { + Services.obs.removeObserver(this, "quit-application"); + this.timer.cancel(); + } + } + + observe(subject, topic, data) { + if (topic == "timer-callback") { + this.abortReason = "timeout"; + } else if (topic == "quit-application") { + this.abortReason = "shutdown"; + } + this.stop(); + this.controller.abort(); + } +} diff --git a/services/common/hawkclient.sys.mjs b/services/common/hawkclient.sys.mjs new file mode 100644 index 0000000000..05a3fa7336 --- /dev/null +++ b/services/common/hawkclient.sys.mjs @@ -0,0 +1,336 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * HAWK is an HTTP authentication scheme using a message authentication code + * (MAC) algorithm to provide partial HTTP request cryptographic verification. + * + * For details, see: https://github.com/hueniverse/hawk + * + * With HAWK, it is essential that the clocks on clients and server not have an + * absolute delta of greater than one minute, as the HAWK protocol uses + * timestamps to reduce the possibility of replay attacks. However, it is + * likely that some clients' clocks will be more than a little off, especially + * in mobile devices, which would break HAWK-based services (like sync and + * firefox accounts) for those clients. + * + * This library provides a stateful HAWK client that calculates (roughly) the + * clock delta on the client vs the server. The library provides an interface + * for deriving HAWK credentials and making HAWK-authenticated REST requests to + * a single remote server. Therefore, callers who want to interact with + * multiple HAWK services should instantiate one HawkClient per service. + */ + +import { HAWKAuthenticatedRESTRequest } from "resource://services-common/hawkrequest.sys.mjs"; + +import { Observers } from "resource://services-common/observers.sys.mjs"; +import { Log } from "resource://gre/modules/Log.sys.mjs"; + +// log.appender.dump should be one of "Fatal", "Error", "Warn", "Info", "Config", +// "Debug", "Trace" or "All". If none is specified, "Error" will be used by +// default. +// Note however that Sync will also add this log to *its* DumpAppender, so +// in a Sync context it shouldn't be necessary to adjust this - however, that +// also means error logs are likely to be dump'd twice but that's OK. +const PREF_LOG_LEVEL = "services.common.hawk.log.appender.dump"; + +// A pref that can be set so "sensitive" information (eg, personally +// identifiable info, credentials, etc) will be logged. +const PREF_LOG_SENSITIVE_DETAILS = "services.common.hawk.log.sensitive"; + +const lazy = {}; + +ChromeUtils.defineLazyGetter(lazy, "log", function () { + let log = Log.repository.getLogger("Hawk"); + // We set the log itself to "debug" and set the level from the preference to + // the appender. This allows other things to send the logs to different + // appenders, while still allowing the pref to control what is seen via dump() + log.level = Log.Level.Debug; + let appender = new Log.DumpAppender(); + log.addAppender(appender); + appender.level = Log.Level.Error; + try { + let level = + Services.prefs.getPrefType(PREF_LOG_LEVEL) == + Ci.nsIPrefBranch.PREF_STRING && + Services.prefs.getStringPref(PREF_LOG_LEVEL); + appender.level = Log.Level[level] || Log.Level.Error; + } catch (e) { + log.error(e); + } + + return log; +}); + +// A boolean to indicate if personally identifiable information (or anything +// else sensitive, such as credentials) should be logged. +ChromeUtils.defineLazyGetter(lazy, "logPII", function () { + try { + return Services.prefs.getBoolPref(PREF_LOG_SENSITIVE_DETAILS); + } catch (_) { + return false; + } +}); + +/* + * A general purpose client for making HAWK authenticated requests to a single + * host. Keeps track of the clock offset between the client and the host for + * computation of the timestamp in the HAWK Authorization header. + * + * Clients should create one HawkClient object per each server they wish to + * interact with. + * + * @param host + * The url of the host + */ +export var HawkClient = function (host) { + this.host = host; + + // Clock offset in milliseconds between our client's clock and the date + // reported in responses from our host. + this._localtimeOffsetMsec = 0; +}; + +HawkClient.prototype = { + /* + * Construct an error message for a response. Private. + * + * @param restResponse + * A RESTResponse object from a RESTRequest + * + * @param error + * A string or object describing the error + */ + _constructError(restResponse, error) { + let errorObj = { + error, + // This object is likely to be JSON.stringify'd, but neither Error() + // objects nor Components.Exception objects do the right thing there, + // so we add a new element which is simply the .toString() version of + // the error object, so it does appear in JSON'd values. + errorString: error.toString(), + message: restResponse.statusText, + code: restResponse.status, + errno: restResponse.status, + toString() { + return this.code + ": " + this.message; + }, + }; + let retryAfter = + restResponse.headers && restResponse.headers["retry-after"]; + retryAfter = retryAfter ? parseInt(retryAfter) : retryAfter; + if (retryAfter) { + errorObj.retryAfter = retryAfter; + // and notify observers of the retry interval + if (this.observerPrefix) { + Observers.notify(this.observerPrefix + ":backoff:interval", retryAfter); + } + } + return errorObj; + }, + + /* + * + * Update clock offset by determining difference from date gives in the (RFC + * 1123) Date header of a server response. Because HAWK tolerates a window + * of one minute of clock skew (so two minutes total since the skew can be + * positive or negative), the simple method of calculating offset here is + * probably good enough. We keep the value in milliseconds to make life + * easier, even though the value will not have millisecond accuracy. + * + * @param dateString + * An RFC 1123 date string (e.g., "Mon, 13 Jan 2014 21:45:06 GMT") + * + * For HAWK clock skew and replay protection, see + * https://github.com/hueniverse/hawk#replay-protection + */ + _updateClockOffset(dateString) { + try { + let serverDateMsec = Date.parse(dateString); + this._localtimeOffsetMsec = serverDateMsec - this.now(); + lazy.log.debug( + "Clock offset vs " + this.host + ": " + this._localtimeOffsetMsec + ); + } catch (err) { + lazy.log.warn("Bad date header in server response: " + dateString); + } + }, + + /* + * Get the current clock offset in milliseconds. + * + * The offset is the number of milliseconds that must be added to the client + * clock to make it equal to the server clock. For example, if the client is + * five minutes ahead of the server, the localtimeOffsetMsec will be -300000. + */ + get localtimeOffsetMsec() { + return this._localtimeOffsetMsec; + }, + + /* + * return current time in milliseconds + */ + now() { + return Date.now(); + }, + + /* A general method for sending raw RESTRequest calls authorized using HAWK + * + * @param path + * API endpoint path + * @param method + * The HTTP request method + * @param credentials + * Hawk credentials + * @param payloadObj + * An object that can be encodable as JSON as the payload of the + * request + * @param extraHeaders + * An object with header/value pairs to send with the request. + * @return Promise + * Returns a promise that resolves to the response of the API call, + * or is rejected with an error. If the server response can be parsed + * as JSON and contains an 'error' property, the promise will be + * rejected with this JSON-parsed response. + */ + async request( + path, + method, + credentials = null, + payloadObj = {}, + extraHeaders = {}, + retryOK = true + ) { + method = method.toLowerCase(); + + let uri = this.host + path; + + let extra = { + now: this.now(), + localtimeOffsetMsec: this.localtimeOffsetMsec, + headers: extraHeaders, + }; + + let request = this.newHAWKAuthenticatedRESTRequest(uri, credentials, extra); + let error; + let restResponse = await request[method](payloadObj).catch(e => { + // Keep a reference to the error, log a message about it, and return the + // response anyway. + error = e; + lazy.log.warn("hawk request error", error); + return request.response; + }); + + // This shouldn't happen anymore, but it's not exactly difficult to handle. + if (!restResponse) { + throw error; + } + + let status = restResponse.status; + + lazy.log.debug( + "(Response) " + + path + + ": code: " + + status + + " - Status text: " + + restResponse.statusText + ); + if (lazy.logPII) { + lazy.log.debug("Response text", restResponse.body); + } + + // All responses may have backoff headers, which are a server-side safety + // valve to allow slowing down clients without hurting performance. + this._maybeNotifyBackoff(restResponse, "x-weave-backoff"); + this._maybeNotifyBackoff(restResponse, "x-backoff"); + + if (error) { + // When things really blow up, reconstruct an error object that follows + // the general format of the server on error responses. + throw this._constructError(restResponse, error); + } + + this._updateClockOffset(restResponse.headers.date); + + if (status === 401 && retryOK && !("retry-after" in restResponse.headers)) { + // Retry once if we were rejected due to a bad timestamp. + // Clock offset is adjusted already in the top of this function. + lazy.log.debug("Received 401 for " + path + ": retrying"); + return this.request( + path, + method, + credentials, + payloadObj, + extraHeaders, + false + ); + } + + // If the server returned a json error message, use it in the rejection + // of the promise. + // + // In the case of a 401, in which we are probably being rejected for a + // bad timestamp, retry exactly once, during which time clock offset will + // be adjusted. + + let jsonResponse = {}; + try { + jsonResponse = JSON.parse(restResponse.body); + } catch (notJSON) {} + + let okResponse = 200 <= status && status < 300; + if (!okResponse || jsonResponse.error) { + if (jsonResponse.error) { + throw jsonResponse; + } + throw this._constructError(restResponse, "Request failed"); + } + + // It's up to the caller to know how to decode the response. + // We just return the whole response. + return restResponse; + }, + + /* + * The prefix used for all notifications sent by this module. This + * allows the handler of notifications to be sure they are handling + * notifications for the service they expect. + * + * If not set, no notifications will be sent. + */ + observerPrefix: null, + + // Given an optional header value, notify that a backoff has been requested. + _maybeNotifyBackoff(response, headerName) { + if (!this.observerPrefix || !response.headers) { + return; + } + let headerVal = response.headers[headerName]; + if (!headerVal) { + return; + } + let backoffInterval; + try { + backoffInterval = parseInt(headerVal, 10); + } catch (ex) { + lazy.log.error( + "hawkclient response had invalid backoff value in '" + + headerName + + "' header: " + + headerVal + ); + return; + } + Observers.notify( + this.observerPrefix + ":backoff:interval", + backoffInterval + ); + }, + + // override points for testing. + newHAWKAuthenticatedRESTRequest(uri, credentials, extra) { + return new HAWKAuthenticatedRESTRequest(uri, credentials, extra); + }, +}; diff --git a/services/common/hawkrequest.sys.mjs b/services/common/hawkrequest.sys.mjs new file mode 100644 index 0000000000..a856ef032d --- /dev/null +++ b/services/common/hawkrequest.sys.mjs @@ -0,0 +1,197 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { Log } from "resource://gre/modules/Log.sys.mjs"; + +import { RESTRequest } from "resource://services-common/rest.sys.mjs"; +import { CommonUtils } from "resource://services-common/utils.sys.mjs"; +import { Credentials } from "resource://gre/modules/Credentials.sys.mjs"; + +const lazy = {}; + +ChromeUtils.defineESModuleGetters(lazy, { + CryptoUtils: "resource://services-crypto/utils.sys.mjs", +}); + +/** + * Single-use HAWK-authenticated HTTP requests to RESTish resources. + * + * @param uri + * (String) URI for the RESTRequest constructor + * + * @param credentials + * (Object) Optional credentials for computing HAWK authentication + * header. + * + * @param payloadObj + * (Object) Optional object to be converted to JSON payload + * + * @param extra + * (Object) Optional extra params for HAWK header computation. + * Valid properties are: + * + * now: <current time in milliseconds>, + * localtimeOffsetMsec: <local clock offset vs server>, + * headers: <An object with header/value pairs to be sent + * as headers on the request> + * + * extra.localtimeOffsetMsec is the value in milliseconds that must be added to + * the local clock to make it agree with the server's clock. For instance, if + * the local clock is two minutes ahead of the server, the time offset in + * milliseconds will be -120000. + */ + +export var HAWKAuthenticatedRESTRequest = function HawkAuthenticatedRESTRequest( + uri, + credentials, + extra = {} +) { + RESTRequest.call(this, uri); + + this.credentials = credentials; + this.now = extra.now || Date.now(); + this.localtimeOffsetMsec = extra.localtimeOffsetMsec || 0; + this._log.trace( + "local time, offset: " + this.now + ", " + this.localtimeOffsetMsec + ); + this.extraHeaders = extra.headers || {}; + + // Expose for testing + this._intl = getIntl(); +}; + +HAWKAuthenticatedRESTRequest.prototype = { + async dispatch(method, data) { + let contentType = "text/plain"; + if (method == "POST" || method == "PUT" || method == "PATCH") { + contentType = "application/json"; + } + if (this.credentials) { + let options = { + now: this.now, + localtimeOffsetMsec: this.localtimeOffsetMsec, + credentials: this.credentials, + payload: (data && JSON.stringify(data)) || "", + contentType, + }; + let header = await lazy.CryptoUtils.computeHAWK( + this.uri, + method, + options + ); + this.setHeader("Authorization", header.field); + } + + for (let header in this.extraHeaders) { + this.setHeader(header, this.extraHeaders[header]); + } + + this.setHeader("Content-Type", contentType); + + this.setHeader("Accept-Language", this._intl.accept_languages); + + return super.dispatch(method, data); + }, +}; + +Object.setPrototypeOf( + HAWKAuthenticatedRESTRequest.prototype, + RESTRequest.prototype +); + +/** + * Generic function to derive Hawk credentials. + * + * Hawk credentials are derived using shared secrets, which depend on the token + * in use. + * + * @param tokenHex + * The current session token encoded in hex + * @param context + * A context for the credentials. A protocol version will be prepended + * to the context, see Credentials.keyWord for more information. + * @param size + * The size in bytes of the expected derived buffer, + * defaults to 3 * 32. + * @return credentials + * Returns an object: + * { + * id: the Hawk id (from the first 32 bytes derived) + * key: the Hawk key (from bytes 32 to 64) + * extra: size - 64 extra bytes (if size > 64) + * } + */ +export async function deriveHawkCredentials(tokenHex, context, size = 96) { + let token = CommonUtils.hexToBytes(tokenHex); + let out = await lazy.CryptoUtils.hkdfLegacy( + token, + undefined, + Credentials.keyWord(context), + size + ); + + let result = { + key: out.slice(32, 64), + id: CommonUtils.bytesAsHex(out.slice(0, 32)), + }; + if (size > 64) { + result.extra = out.slice(64); + } + + return result; +} + +// With hawk request, we send the user's accepted-languages with each request. +// To keep the number of times we read this pref at a minimum, maintain the +// preference in a stateful object that notices and updates itself when the +// pref is changed. +function Intl() { + // We won't actually query the pref until the first time we need it + this._accepted = ""; + this._everRead = false; + this.init(); +} + +Intl.prototype = { + init() { + Services.prefs.addObserver("intl.accept_languages", this); + }, + + uninit() { + Services.prefs.removeObserver("intl.accept_languages", this); + }, + + observe(subject, topic, data) { + this.readPref(); + }, + + readPref() { + this._everRead = true; + try { + this._accepted = Services.prefs.getComplexValue( + "intl.accept_languages", + Ci.nsIPrefLocalizedString + ).data; + } catch (err) { + let log = Log.repository.getLogger("Services.Common.RESTRequest"); + log.error("Error reading intl.accept_languages pref", err); + } + }, + + get accept_languages() { + if (!this._everRead) { + this.readPref(); + } + return this._accepted; + }, +}; + +// Singleton getter for Intl, creating an instance only when we first need it. +var intl = null; +function getIntl() { + if (!intl) { + intl = new Intl(); + } + return intl; +} diff --git a/services/common/kinto-http-client.sys.mjs b/services/common/kinto-http-client.sys.mjs new file mode 100644 index 0000000000..6ccaa520c1 --- /dev/null +++ b/services/common/kinto-http-client.sys.mjs @@ -0,0 +1,3061 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * This file is generated from kinto.js - do not modify directly. + */ + +import { setTimeout, clearTimeout } from "resource://gre/modules/Timer.sys.mjs"; + +/* + * Version 15.0.0 - c8775d9 + */ + +import { EventEmitter } from "resource://gre/modules/EventEmitter.sys.mjs"; + +/****************************************************************************** +Copyright (c) Microsoft Corporation. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. +***************************************************************************** */ +/* global Reflect, Promise */ + +function __decorate(decorators, target, key, desc) { + var c = arguments.length, + r = + c < 3 + ? target + : desc === null + ? (desc = Object.getOwnPropertyDescriptor(target, key)) + : desc, + d; + if (typeof Reflect === "object" && typeof Reflect.decorate === "function") + r = Reflect.decorate(decorators, target, key, desc); + else + for (var i = decorators.length - 1; i >= 0; i--) + if ((d = decorators[i])) + r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; + return c > 3 && r && Object.defineProperty(target, key, r), r; +} + +/** + * Chunks an array into n pieces. + * + * @private + * @param {Array} array + * @param {Number} n + * @return {Array} + */ +function partition(array, n) { + if (n <= 0) { + return [array]; + } + return array.reduce((acc, x, i) => { + if (i === 0 || i % n === 0) { + acc.push([x]); + } else { + acc[acc.length - 1].push(x); + } + return acc; + }, []); +} +/** + * Returns a Promise always resolving after the specified amount in milliseconds. + * + * @return Promise<void> + */ +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} +/** + * Always returns a resource data object from the provided argument. + * + * @private + * @param {Object|String} resource + * @return {Object} + */ +function toDataBody(resource) { + if (isObject(resource)) { + return resource; + } + if (typeof resource === "string") { + return { id: resource }; + } + throw new Error("Invalid argument."); +} +/** + * Transforms an object into an URL query string, stripping out any undefined + * values. + * + * @param {Object} obj + * @return {String} + */ +function qsify(obj) { + const encode = v => + encodeURIComponent(typeof v === "boolean" ? String(v) : v); + const stripped = cleanUndefinedProperties(obj); + return Object.keys(stripped) + .map(k => { + const ks = encode(k) + "="; + if (Array.isArray(stripped[k])) { + return ks + stripped[k].map(v => encode(v)).join(","); + } + return ks + encode(stripped[k]); + }) + .join("&"); +} +/** + * Checks if a version is within the provided range. + * + * @param {String} version The version to check. + * @param {String} minVersion The minimum supported version (inclusive). + * @param {String} maxVersion The minimum supported version (exclusive). + * @throws {Error} If the version is outside of the provided range. + */ +function checkVersion(version, minVersion, maxVersion) { + const extract = str => str.split(".").map(x => parseInt(x, 10)); + const [verMajor, verMinor] = extract(version); + const [minMajor, minMinor] = extract(minVersion); + const [maxMajor, maxMinor] = extract(maxVersion); + const checks = [ + verMajor < minMajor, + verMajor === minMajor && verMinor < minMinor, + verMajor > maxMajor, + verMajor === maxMajor && verMinor >= maxMinor, + ]; + if (checks.some(x => x)) { + throw new Error( + `Version ${version} doesn't satisfy ${minVersion} <= x < ${maxVersion}` + ); + } +} +/** + * Generates a decorator function ensuring a version check is performed against + * the provided requirements before executing it. + * + * @param {String} min The required min version (inclusive). + * @param {String} max The required max version (inclusive). + * @return {Function} + */ +function support(min, max) { + return function ( + // @ts-ignore + target, + key, + descriptor + ) { + const fn = descriptor.value; + return { + configurable: true, + get() { + const wrappedMethod = (...args) => { + // "this" is the current instance which its method is decorated. + const client = this.client ? this.client : this; + return client + .fetchHTTPApiVersion() + .then(version => checkVersion(version, min, max)) + .then(() => fn.apply(this, args)); + }; + Object.defineProperty(this, key, { + value: wrappedMethod, + configurable: true, + writable: true, + }); + return wrappedMethod; + }, + }; + }; +} +/** + * Generates a decorator function ensuring that the specified capabilities are + * available on the server before executing it. + * + * @param {Array<String>} capabilities The required capabilities. + * @return {Function} + */ +function capable(capabilities) { + return function ( + // @ts-ignore + target, + key, + descriptor + ) { + const fn = descriptor.value; + return { + configurable: true, + get() { + const wrappedMethod = (...args) => { + // "this" is the current instance which its method is decorated. + const client = this.client ? this.client : this; + return client + .fetchServerCapabilities() + .then(available => { + const missing = capabilities.filter(c => !(c in available)); + if (missing.length) { + const missingStr = missing.join(", "); + throw new Error( + `Required capabilities ${missingStr} not present on server` + ); + } + }) + .then(() => fn.apply(this, args)); + }; + Object.defineProperty(this, key, { + value: wrappedMethod, + configurable: true, + writable: true, + }); + return wrappedMethod; + }, + }; + }; +} +/** + * Generates a decorator function ensuring an operation is not performed from + * within a batch request. + * + * @param {String} message The error message to throw. + * @return {Function} + */ +function nobatch(message) { + return function ( + // @ts-ignore + target, + key, + descriptor + ) { + const fn = descriptor.value; + return { + configurable: true, + get() { + const wrappedMethod = (...args) => { + // "this" is the current instance which its method is decorated. + if (this._isBatch) { + throw new Error(message); + } + return fn.apply(this, args); + }; + Object.defineProperty(this, key, { + value: wrappedMethod, + configurable: true, + writable: true, + }); + return wrappedMethod; + }, + }; + }; +} +/** + * Returns true if the specified value is an object (i.e. not an array nor null). + * @param {Object} thing The value to inspect. + * @return {bool} + */ +function isObject(thing) { + return typeof thing === "object" && thing !== null && !Array.isArray(thing); +} +/** + * Parses a data url. + * @param {String} dataURL The data url. + * @return {Object} + */ +function parseDataURL(dataURL) { + const regex = /^data:(.*);base64,(.*)/; + const match = dataURL.match(regex); + if (!match) { + throw new Error(`Invalid data-url: ${String(dataURL).substring(0, 32)}...`); + } + const props = match[1]; + const base64 = match[2]; + const [type, ...rawParams] = props.split(";"); + const params = rawParams.reduce((acc, param) => { + const [key, value] = param.split("="); + return { ...acc, [key]: value }; + }, {}); + return { ...params, type, base64 }; +} +/** + * Extracts file information from a data url. + * @param {String} dataURL The data url. + * @return {Object} + */ +function extractFileInfo(dataURL) { + const { name, type, base64 } = parseDataURL(dataURL); + const binary = atob(base64); + const array = []; + for (let i = 0; i < binary.length; i++) { + array.push(binary.charCodeAt(i)); + } + const blob = new Blob([new Uint8Array(array)], { type }); + return { blob, name }; +} +/** + * Creates a FormData instance from a data url and an existing JSON response + * body. + * @param {String} dataURL The data url. + * @param {Object} body The response body. + * @param {Object} [options={}] The options object. + * @param {Object} [options.filename] Force attachment file name. + * @return {FormData} + */ +function createFormData(dataURL, body, options = {}) { + const { filename = "untitled" } = options; + const { blob, name } = extractFileInfo(dataURL); + const formData = new FormData(); + formData.append("attachment", blob, name || filename); + for (const property in body) { + if (typeof body[property] !== "undefined") { + formData.append(property, JSON.stringify(body[property])); + } + } + return formData; +} +/** + * Clones an object with all its undefined keys removed. + * @private + */ +function cleanUndefinedProperties(obj) { + const result = {}; + for (const key in obj) { + if (typeof obj[key] !== "undefined") { + result[key] = obj[key]; + } + } + return result; +} +/** + * Handle common query parameters for Kinto requests. + * + * @param {String} [path] The endpoint base path. + * @param {Array} [options.fields] Fields to limit the + * request to. + * @param {Object} [options.query={}] Additional query arguments. + */ +function addEndpointOptions(path, options = {}) { + const query = { ...options.query }; + if (options.fields) { + query._fields = options.fields; + } + const queryString = qsify(query); + if (queryString) { + return path + "?" + queryString; + } + return path; +} +/** + * Replace authorization header with an obscured version + */ +function obscureAuthorizationHeader(headers) { + const h = new Headers(headers); + if (h.has("authorization")) { + h.set("authorization", "**** (suppressed)"); + } + const obscuredHeaders = {}; + for (const [header, value] of h.entries()) { + obscuredHeaders[header] = value; + } + return obscuredHeaders; +} + +/** + * Kinto server error code descriptors. + */ +const ERROR_CODES = { + 104: "Missing Authorization Token", + 105: "Invalid Authorization Token", + 106: "Request body was not valid JSON", + 107: "Invalid request parameter", + 108: "Missing request parameter", + 109: "Invalid posted data", + 110: "Invalid Token / id", + 111: "Missing Token / id", + 112: "Content-Length header was not provided", + 113: "Request body too large", + 114: "Resource was created, updated or deleted meanwhile", + 115: "Method not allowed on this end point (hint: server may be readonly)", + 116: "Requested version not available on this server", + 117: "Client has sent too many requests", + 121: "Resource access is forbidden for this user", + 122: "Another resource violates constraint", + 201: "Service Temporary unavailable due to high load", + 202: "Service deprecated", + 999: "Internal Server Error", +}; +class NetworkTimeoutError extends Error { + constructor(url, options) { + super( + `Timeout while trying to access ${url} with ${JSON.stringify(options)}` + ); + if (Error.captureStackTrace) { + Error.captureStackTrace(this, NetworkTimeoutError); + } + this.url = url; + this.options = options; + } +} +class UnparseableResponseError extends Error { + constructor(response, body, error) { + const { status } = response; + super( + `Response from server unparseable (HTTP ${ + status || 0 + }; ${error}): ${body}` + ); + if (Error.captureStackTrace) { + Error.captureStackTrace(this, UnparseableResponseError); + } + this.status = status; + this.response = response; + this.stack = error.stack; + this.error = error; + } +} +/** + * "Error" subclass representing a >=400 response from the server. + * + * Whether or not this is an error depends on your application. + * + * The `json` field can be undefined if the server responded with an + * empty response body. This shouldn't generally happen. Most "bad" + * responses come with a JSON error description, or (if they're + * fronted by a CDN or nginx or something) occasionally non-JSON + * responses (which become UnparseableResponseErrors, above). + */ +class ServerResponse extends Error { + constructor(response, json) { + const { status } = response; + let { statusText } = response; + let errnoMsg; + if (json) { + // Try to fill in information from the JSON error. + statusText = json.error || statusText; + // Take errnoMsg from either ERROR_CODES or json.message. + if (json.errno && json.errno in ERROR_CODES) { + errnoMsg = ERROR_CODES[json.errno]; + } else if (json.message) { + errnoMsg = json.message; + } + // If we had both ERROR_CODES and json.message, and they differ, + // combine them. + if (errnoMsg && json.message && json.message !== errnoMsg) { + errnoMsg += ` (${json.message})`; + } + } + let message = `HTTP ${status} ${statusText}`; + if (errnoMsg) { + message += `: ${errnoMsg}`; + } + super(message.trim()); + if (Error.captureStackTrace) { + Error.captureStackTrace(this, ServerResponse); + } + this.response = response; + this.data = json; + } +} + +var errors = /*#__PURE__*/ Object.freeze({ + __proto__: null, + NetworkTimeoutError, + ServerResponse, + UnparseableResponseError, + default: ERROR_CODES, +}); + +/** + * Enhanced HTTP client for the Kinto protocol. + * @private + */ +class HTTP { + /** + * Default HTTP request headers applied to each outgoing request. + * + * @type {Object} + */ + static get DEFAULT_REQUEST_HEADERS() { + return { + Accept: "application/json", + "Content-Type": "application/json", + }; + } + /** + * Default options. + * + * @type {Object} + */ + static get defaultOptions() { + return { timeout: null, requestMode: "cors" }; + } + /** + * Constructor. + * + * @param {EventEmitter} events The event handler. + * @param {Object} [options={}} The options object. + * @param {Number} [options.timeout=null] The request timeout in ms, if any (default: `null`). + * @param {String} [options.requestMode="cors"] The HTTP request mode (default: `"cors"`). + */ + constructor(events, options = {}) { + // public properties + /** + * The event emitter instance. + * @type {EventEmitter} + */ + this.events = events; + /** + * The request mode. + * @see https://fetch.spec.whatwg.org/#requestmode + * @type {String} + */ + this.requestMode = options.requestMode || HTTP.defaultOptions.requestMode; + /** + * The request timeout. + * @type {Number} + */ + this.timeout = options.timeout || HTTP.defaultOptions.timeout; + /** + * The fetch() function. + * @type {Function} + */ + this.fetchFunc = options.fetchFunc || globalThis.fetch.bind(globalThis); + } + /** + * @private + */ + timedFetch(url, options) { + let hasTimedout = false; + return new Promise((resolve, reject) => { + // Detect if a request has timed out. + let _timeoutId; + if (this.timeout) { + _timeoutId = setTimeout(() => { + hasTimedout = true; + if (options && options.headers) { + options = { + ...options, + headers: obscureAuthorizationHeader(options.headers), + }; + } + reject(new NetworkTimeoutError(url, options)); + }, this.timeout); + } + function proceedWithHandler(fn) { + return arg => { + if (!hasTimedout) { + if (_timeoutId) { + clearTimeout(_timeoutId); + } + fn(arg); + } + }; + } + this.fetchFunc(url, options) + .then(proceedWithHandler(resolve)) + .catch(proceedWithHandler(reject)); + }); + } + /** + * @private + */ + async processResponse(response) { + const { status, headers } = response; + const text = await response.text(); + // Check if we have a body; if so parse it as JSON. + let json; + if (text.length !== 0) { + try { + json = JSON.parse(text); + } catch (err) { + throw new UnparseableResponseError(response, text, err); + } + } + if (status >= 400) { + throw new ServerResponse(response, json); + } + return { status, json: json, headers }; + } + /** + * @private + */ + async retry(url, retryAfter, request, options) { + await delay(retryAfter); + return this.request(url, request, { + ...options, + retry: options.retry - 1, + }); + } + /** + * Performs an HTTP request to the Kinto server. + * + * Resolves with an objet containing the following HTTP response properties: + * - `{Number} status` The HTTP status code. + * - `{Object} json` The JSON response body. + * - `{Headers} headers` The response headers object; see the ES6 fetch() spec. + * + * @param {String} url The URL. + * @param {Object} [request={}] The request object, passed to + * fetch() as its options object. + * @param {Object} [request.headers] The request headers object (default: {}) + * @param {Object} [options={}] Options for making the + * request + * @param {Number} [options.retry] Number of retries (default: 0) + * @return {Promise} + */ + async request(url, request = { headers: {} }, options = { retry: 0 }) { + // Ensure default request headers are always set + request.headers = { ...HTTP.DEFAULT_REQUEST_HEADERS, ...request.headers }; + // If a multipart body is provided, remove any custom Content-Type header as + // the fetch() implementation will add the correct one for us. + if (request.body && request.body instanceof FormData) { + if (request.headers instanceof Headers) { + request.headers.delete("Content-Type"); + } else if (!Array.isArray(request.headers)) { + delete request.headers["Content-Type"]; + } + } + request.mode = this.requestMode; + const response = await this.timedFetch(url, request); + const { headers } = response; + this._checkForDeprecationHeader(headers); + this._checkForBackoffHeader(headers); + // Check if the server summons the client to retry after a while. + const retryAfter = this._checkForRetryAfterHeader(headers); + // If number of allowed of retries is not exhausted, retry the same request. + if (retryAfter && options.retry > 0) { + return this.retry(url, retryAfter, request, options); + } + return this.processResponse(response); + } + _checkForDeprecationHeader(headers) { + const alertHeader = headers.get("Alert"); + if (!alertHeader) { + return; + } + let alert; + try { + alert = JSON.parse(alertHeader); + } catch (err) { + console.warn("Unable to parse Alert header message", alertHeader); + return; + } + console.warn(alert.message, alert.url); + if (this.events) { + this.events.emit("deprecated", alert); + } + } + _checkForBackoffHeader(headers) { + let backoffMs; + const backoffHeader = headers.get("Backoff"); + const backoffSeconds = backoffHeader ? parseInt(backoffHeader, 10) : 0; + if (backoffSeconds > 0) { + backoffMs = new Date().getTime() + backoffSeconds * 1000; + } else { + backoffMs = 0; + } + if (this.events) { + this.events.emit("backoff", backoffMs); + } + } + _checkForRetryAfterHeader(headers) { + const retryAfter = headers.get("Retry-After"); + if (!retryAfter) { + return null; + } + const delay = parseInt(retryAfter, 10) * 1000; + const tryAgainAfter = new Date().getTime() + delay; + if (this.events) { + this.events.emit("retry-after", tryAgainAfter); + } + return delay; + } +} + +/** + * Endpoints templates. + * @type {Object} + */ +const ENDPOINTS = { + root: () => "/", + batch: () => "/batch", + permissions: () => "/permissions", + bucket: bucket => "/buckets" + (bucket ? `/${bucket}` : ""), + history: bucket => `${ENDPOINTS.bucket(bucket)}/history`, + collection: (bucket, coll) => + `${ENDPOINTS.bucket(bucket)}/collections` + (coll ? `/${coll}` : ""), + group: (bucket, group) => + `${ENDPOINTS.bucket(bucket)}/groups` + (group ? `/${group}` : ""), + record: (bucket, coll, id) => + `${ENDPOINTS.collection(bucket, coll)}/records` + (id ? `/${id}` : ""), + attachment: (bucket, coll, id) => + `${ENDPOINTS.record(bucket, coll, id)}/attachment`, +}; + +const requestDefaults = { + safe: false, + // check if we should set default content type here + headers: {}, + patch: false, +}; +/** + * @private + */ +function safeHeader(safe, last_modified) { + if (!safe) { + return {}; + } + if (last_modified) { + return { "If-Match": `"${last_modified}"` }; + } + return { "If-None-Match": "*" }; +} +/** + * @private + */ +function createRequest(path, { data, permissions }, options = {}) { + const { headers, safe } = { + ...requestDefaults, + ...options, + }; + const method = options.method || (data && data.id) ? "PUT" : "POST"; + return { + method, + path, + headers: { ...headers, ...safeHeader(safe) }, + body: { data, permissions }, + }; +} +/** + * @private + */ +function updateRequest(path, { data, permissions }, options = {}) { + const { headers, safe, patch } = { ...requestDefaults, ...options }; + const { last_modified } = { ...data, ...options }; + const hasNoData = + data && + Object.keys(data).filter(k => k !== "id" && k !== "last_modified") + .length === 0; + if (hasNoData) { + data = undefined; + } + return { + method: patch ? "PATCH" : "PUT", + path, + headers: { ...headers, ...safeHeader(safe, last_modified) }, + body: { data, permissions }, + }; +} +/** + * @private + */ +function jsonPatchPermissionsRequest(path, permissions, opType, options = {}) { + const { headers, safe, last_modified } = { ...requestDefaults, ...options }; + const ops = []; + for (const [type, principals] of Object.entries(permissions)) { + if (principals) { + for (const principal of principals) { + ops.push({ + op: opType, + path: `/permissions/${type}/${principal}`, + }); + } + } + } + return { + method: "PATCH", + path, + headers: { + ...headers, + ...safeHeader(safe, last_modified), + "Content-Type": "application/json-patch+json", + }, + body: ops, + }; +} +/** + * @private + */ +function deleteRequest(path, options = {}) { + const { headers, safe, last_modified } = { + ...requestDefaults, + ...options, + }; + if (safe && !last_modified) { + throw new Error("Safe concurrency check requires a last_modified value."); + } + return { + method: "DELETE", + path, + headers: { ...headers, ...safeHeader(safe, last_modified) }, + }; +} +/** + * @private + */ +function addAttachmentRequest( + path, + dataURI, + { data, permissions } = {}, + options = {} +) { + const { headers, safe, gzipped } = { ...requestDefaults, ...options }; + const { last_modified } = { ...data, ...options }; + const body = { data, permissions }; + const formData = createFormData(dataURI, body, options); + const customPath = `${path}${ + gzipped !== null ? "?gzipped=" + (gzipped ? "true" : "false") : "" + }`; + return { + method: "POST", + path: customPath, + headers: { ...headers, ...safeHeader(safe, last_modified) }, + body: formData, + }; +} + +/** + * Exports batch responses as a result object. + * + * @private + * @param {Array} responses The batch subrequest responses. + * @param {Array} requests The initial issued requests. + * @return {Object} + */ +function aggregate(responses = [], requests = []) { + if (responses.length !== requests.length) { + throw new Error("Responses length should match requests one."); + } + const results = { + errors: [], + published: [], + conflicts: [], + skipped: [], + }; + return responses.reduce((acc, response, index) => { + const { status } = response; + const request = requests[index]; + if (status >= 200 && status < 400) { + acc.published.push(response.body); + } else if (status === 404) { + // Extract the id manually from request path while waiting for Kinto/kinto#818 + const regex = /(buckets|groups|collections|records)\/([^/]+)$/; + const extracts = request.path.match(regex); + const id = extracts && extracts.length === 3 ? extracts[2] : undefined; + acc.skipped.push({ + id, + path: request.path, + error: response.body, + }); + } else if (status === 412) { + acc.conflicts.push({ + // XXX: specifying the type is probably superfluous + type: "outgoing", + local: request.body, + remote: + (response.body.details && response.body.details.existing) || null, + }); + } else { + acc.errors.push({ + path: request.path, + sent: request, + error: response.body, + }); + } + return acc; + }, results); +} + +// Unique ID creation requires a high quality random # generator. In the browser we therefore +// require the crypto API and do not support built-in fallback to lower quality random number +// generators (like Math.random()). +let getRandomValues; +const rnds8 = new Uint8Array(16); +function rng() { + // lazy load so that environments that need to polyfill have a chance to do so + if (!getRandomValues) { + // getRandomValues needs to be invoked in a context where "this" is a Crypto implementation. + getRandomValues = + typeof crypto !== "undefined" && + crypto.getRandomValues && + crypto.getRandomValues.bind(crypto); + + if (!getRandomValues) { + throw new Error( + "crypto.getRandomValues() not supported. See https://github.com/uuidjs/uuid#getrandomvalues-not-supported" + ); + } + } + + return getRandomValues(rnds8); +} + +/** + * Convert array of 16 byte values to UUID string format of the form: + * XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + */ + +const byteToHex = []; + +for (let i = 0; i < 256; ++i) { + byteToHex.push((i + 0x100).toString(16).slice(1)); +} + +function unsafeStringify(arr, offset = 0) { + // Note: Be careful editing this code! It's been tuned for performance + // and works in ways you may not expect. See https://github.com/uuidjs/uuid/pull/434 + return ( + byteToHex[arr[offset + 0]] + + byteToHex[arr[offset + 1]] + + byteToHex[arr[offset + 2]] + + byteToHex[arr[offset + 3]] + + "-" + + byteToHex[arr[offset + 4]] + + byteToHex[arr[offset + 5]] + + "-" + + byteToHex[arr[offset + 6]] + + byteToHex[arr[offset + 7]] + + "-" + + byteToHex[arr[offset + 8]] + + byteToHex[arr[offset + 9]] + + "-" + + byteToHex[arr[offset + 10]] + + byteToHex[arr[offset + 11]] + + byteToHex[arr[offset + 12]] + + byteToHex[arr[offset + 13]] + + byteToHex[arr[offset + 14]] + + byteToHex[arr[offset + 15]] + ).toLowerCase(); +} + +const randomUUID = + typeof crypto !== "undefined" && + crypto.randomUUID && + crypto.randomUUID.bind(crypto); +var native = { + randomUUID, +}; + +function v4(options, buf, offset) { + if (native.randomUUID && !buf && !options) { + return native.randomUUID(); + } + + options = options || {}; + const rnds = options.random || (options.rng || rng)(); // Per 4.4, set bits for version and `clock_seq_hi_and_reserved` + + rnds[6] = (rnds[6] & 0x0f) | 0x40; + rnds[8] = (rnds[8] & 0x3f) | 0x80; // Copy bytes to buffer, if provided + + if (buf) { + offset = offset || 0; + + for (let i = 0; i < 16; ++i) { + buf[offset + i] = rnds[i]; + } + + return buf; + } + + return unsafeStringify(rnds); +} + +/** + * Abstract representation of a selected collection. + * + */ +class Collection { + /** + * Constructor. + * + * @param {KintoClient} client The client instance. + * @param {Bucket} bucket The bucket instance. + * @param {String} name The collection name. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.retry] The retry option. + * @param {Boolean} [options.batch] (Private) Whether this + * Collection is operating as part of a batch. + */ + constructor(client, bucket, name, options = {}) { + /** + * @ignore + */ + this.client = client; + /** + * @ignore + */ + this.bucket = bucket; + /** + * The collection name. + * @type {String} + */ + this.name = name; + this._endpoints = client.endpoints; + /** + * @ignore + */ + this._retry = options.retry || 0; + this._safe = !!options.safe; + // FIXME: This is kind of ugly; shouldn't the bucket be responsible + // for doing the merge? + this._headers = { + ...this.bucket.headers, + ...options.headers, + }; + } + get execute() { + return this.client.execute.bind(this.client); + } + /** + * Get the value of "headers" for a given request, merging the + * per-request headers with our own "default" headers. + * + * @private + */ + _getHeaders(options) { + return { + ...this._headers, + ...options.headers, + }; + } + /** + * Get the value of "safe" for a given request, using the + * per-request option if present or falling back to our default + * otherwise. + * + * @private + * @param {Object} options The options for a request. + * @returns {Boolean} + */ + _getSafe(options) { + return { safe: this._safe, ...options }.safe; + } + /** + * As _getSafe, but for "retry". + * + * @private + */ + _getRetry(options) { + return { retry: this._retry, ...options }.retry; + } + /** + * Retrieves the total number of records in this collection. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Number, Error>} + */ + async getTotalRecords(options = {}) { + const path = this._endpoints.record(this.bucket.name, this.name); + const request = { + headers: this._getHeaders(options), + path, + method: "HEAD", + }; + const { headers } = await this.client.execute(request, { + raw: true, + retry: this._getRetry(options), + }); + return parseInt(headers.get("Total-Records"), 10); + } + /** + * Retrieves the ETag of the records list, for use with the `since` filtering option. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<String, Error>} + */ + async getRecordsTimestamp(options = {}) { + const path = this._endpoints.record(this.bucket.name, this.name); + const request = { + headers: this._getHeaders(options), + path, + method: "HEAD", + }; + const { headers } = await this.client.execute(request, { + raw: true, + retry: this._getRetry(options), + }); + return headers.get("ETag"); + } + /** + * Retrieves collection data. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Object} [options.query] Query parameters to pass in + * the request. This might be useful for features that aren't + * yet supported by this library. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async getData(options = {}) { + const path = this._endpoints.collection(this.bucket.name, this.name); + const request = { headers: this._getHeaders(options), path }; + const { data } = await this.client.execute(request, { + retry: this._getRetry(options), + query: options.query, + fields: options.fields, + }); + return data; + } + /** + * Set collection data. + * @param {Object} data The collection data object. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Boolean} [options.patch] The patch option. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async setData(data, options = {}) { + if (!isObject(data)) { + throw new Error("A collection object is required."); + } + const { patch, permissions } = options; + const { last_modified } = { ...data, ...options }; + const path = this._endpoints.collection(this.bucket.name, this.name); + const request = updateRequest( + path, + { data, permissions }, + { + last_modified, + patch, + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Retrieves the list of permissions for this collection. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async getPermissions(options = {}) { + const path = this._endpoints.collection(this.bucket.name, this.name); + const request = { headers: this._getHeaders(options), path }; + const { permissions } = await this.client.execute(request, { + retry: this._getRetry(options), + }); + return permissions; + } + /** + * Replaces all existing collection permissions with the ones provided. + * + * @param {Object} permissions The permissions object. + * @param {Object} [options={}] The options object + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async setPermissions(permissions, options = {}) { + if (!isObject(permissions)) { + throw new Error("A permissions object is required."); + } + const path = this._endpoints.collection(this.bucket.name, this.name); + const data = { last_modified: options.last_modified }; + const request = updateRequest( + path, + { data, permissions }, + { + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Append principals to the collection permissions. + * + * @param {Object} permissions The permissions object. + * @param {Object} [options={}] The options object + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async addPermissions(permissions, options = {}) { + if (!isObject(permissions)) { + throw new Error("A permissions object is required."); + } + const path = this._endpoints.collection(this.bucket.name, this.name); + const { last_modified } = options; + const request = jsonPatchPermissionsRequest(path, permissions, "add", { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Remove principals from the collection permissions. + * + * @param {Object} permissions The permissions object. + * @param {Object} [options={}] The options object + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async removePermissions(permissions, options = {}) { + if (!isObject(permissions)) { + throw new Error("A permissions object is required."); + } + const path = this._endpoints.collection(this.bucket.name, this.name); + const { last_modified } = options; + const request = jsonPatchPermissionsRequest(path, permissions, "remove", { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Creates a record in current collection. + * + * @param {Object} record The record to create. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.permissions] The permissions option. + * @return {Promise<Object, Error>} + */ + async createRecord(record, options = {}) { + const { permissions } = options; + const path = this._endpoints.record(this.bucket.name, this.name, record.id); + const request = createRequest( + path, + { data: record, permissions }, + { + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Adds an attachment to a record, creating the record when it doesn't exist. + * + * @param {String} dataURL The data url. + * @param {Object} [record={}] The record data. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.last_modified] The last_modified option. + * @param {Object} [options.permissions] The permissions option. + * @param {String} [options.filename] Force the attachment filename. + * @param {String} [options.gzipped] Force the attachment to be gzipped or not. + * @return {Promise<Object, Error>} + */ + async addAttachment(dataURI, record = {}, options = {}) { + const { permissions } = options; + const id = record.id || v4(); + const path = this._endpoints.attachment(this.bucket.name, this.name, id); + const { last_modified } = { ...record, ...options }; + const addAttachmentRequest$1 = addAttachmentRequest( + path, + dataURI, + { data: record, permissions }, + { + last_modified, + filename: options.filename, + gzipped: options.gzipped, + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + await this.client.execute(addAttachmentRequest$1, { + stringify: false, + retry: this._getRetry(options), + }); + return this.getRecord(id); + } + /** + * Removes an attachment from a given record. + * + * @param {Object} recordId The record id. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.last_modified] The last_modified option. + */ + async removeAttachment(recordId, options = {}) { + const { last_modified } = options; + const path = this._endpoints.attachment( + this.bucket.name, + this.name, + recordId + ); + const request = deleteRequest(path, { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Updates a record in current collection. + * + * @param {Object} record The record to update. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.last_modified] The last_modified option. + * @param {Object} [options.permissions] The permissions option. + * @return {Promise<Object, Error>} + */ + async updateRecord(record, options = {}) { + if (!isObject(record)) { + throw new Error("A record object is required."); + } + if (!record.id) { + throw new Error("A record id is required."); + } + const { permissions } = options; + const { last_modified } = { ...record, ...options }; + const path = this._endpoints.record(this.bucket.name, this.name, record.id); + const request = updateRequest( + path, + { data: record, permissions }, + { + headers: this._getHeaders(options), + safe: this._getSafe(options), + last_modified, + patch: !!options.patch, + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Deletes a record from the current collection. + * + * @param {Object|String} record The record to delete. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async deleteRecord(record, options = {}) { + const recordObj = toDataBody(record); + if (!recordObj.id) { + throw new Error("A record id is required."); + } + const { id } = recordObj; + const { last_modified } = { ...recordObj, ...options }; + const path = this._endpoints.record(this.bucket.name, this.name, id); + const request = deleteRequest(path, { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Deletes records from the current collection. + * + * Sorting is done by passing a `sort` string option: + * + * - The field to order the results by, prefixed with `-` for descending. + * Default: `-last_modified`. + * + * @see http://kinto.readthedocs.io/en/stable/api/1.x/sorting.html + * + * Filtering is done by passing a `filters` option object: + * + * - `{fieldname: "value"}` + * - `{min_fieldname: 4000}` + * - `{in_fieldname: "1,2,3"}` + * - `{not_fieldname: 0}` + * - `{exclude_fieldname: "0,1"}` + * + * @see http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.filters={}] The filters object. + * @param {String} [options.sort="-last_modified"] The sort field. + * @param {String} [options.at] The timestamp to get a snapshot at. + * @param {String} [options.limit=null] The limit field. + * @param {String} [options.pages=1] The number of result pages to aggregate. + * @param {Number} [options.since=null] Only retrieve records modified since the provided timestamp. + * @param {Array} [options.fields] Limit response to just some fields. + * @return {Promise<Object, Error>} + */ + async deleteRecords(options = {}) { + const path = this._endpoints.record(this.bucket.name, this.name); + return this.client.paginatedDelete(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Retrieves a record from the current collection. + * + * @param {String} id The record id to retrieve. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Object} [options.query] Query parameters to pass in + * the request. This might be useful for features that aren't + * yet supported by this library. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async getRecord(id, options = {}) { + const path = this._endpoints.record(this.bucket.name, this.name, id); + const request = { headers: this._getHeaders(options), path }; + return this.client.execute(request, { + retry: this._getRetry(options), + query: options.query, + fields: options.fields, + }); + } + /** + * Lists records from the current collection. + * + * Sorting is done by passing a `sort` string option: + * + * - The field to order the results by, prefixed with `-` for descending. + * Default: `-last_modified`. + * + * @see http://kinto.readthedocs.io/en/stable/api/1.x/sorting.html + * + * Filtering is done by passing a `filters` option object: + * + * - `{fieldname: "value"}` + * - `{min_fieldname: 4000}` + * - `{in_fieldname: "1,2,3"}` + * - `{not_fieldname: 0}` + * - `{exclude_fieldname: "0,1"}` + * + * @see http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html + * + * Paginating is done by passing a `limit` option, then calling the `next()` + * method from the resolved result object to fetch the next page, if any. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.filters={}] The filters object. + * @param {String} [options.sort="-last_modified"] The sort field. + * @param {String} [options.at] The timestamp to get a snapshot at. + * @param {String} [options.limit=null] The limit field. + * @param {String} [options.pages=1] The number of result pages to aggregate. + * @param {Number} [options.since=null] Only retrieve records modified since the provided timestamp. + * @param {Array} [options.fields] Limit response to just some fields. + * @return {Promise<Object, Error>} + */ + async listRecords(options = {}) { + const path = this._endpoints.record(this.bucket.name, this.name); + if (options.at) { + return this.getSnapshot(options.at); + } + return this.client.paginatedList(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * @private + */ + async isHistoryComplete() { + // We consider that if we have the collection creation event part of the + // history, then all records change events have been tracked. + const { + data: [oldestHistoryEntry], + } = await this.bucket.listHistory({ + limit: 1, + filters: { + action: "create", + resource_name: "collection", + collection_id: this.name, + }, + }); + return !!oldestHistoryEntry; + } + /** + * @private + */ + async getSnapshot(at) { + if (!at || !Number.isInteger(at) || at <= 0) { + throw new Error("Invalid argument, expected a positive integer."); + } + // Retrieve history and check it covers the required time range. + // Ensure we have enough history data to retrieve the complete list of + // changes. + if (!(await this.isHistoryComplete())) { + throw new Error( + "Computing a snapshot is only possible when the full history for a " + + "collection is available. Here, the history plugin seems to have " + + "been enabled after the creation of the collection." + ); + } + // Because of https://github.com/Kinto/kinto-http.js/issues/963 + // we cannot simply rely on the history endpoint. + // Our strategy here is to clean-up the history entries from the + // records that were deleted via the plural endpoint. + // We will detect them by comparing the current state of the collection + // and the full history of the collection since its genesis. + // List full history of collection. + const { data: fullHistory } = await this.bucket.listHistory({ + pages: Infinity, + sort: "last_modified", + filters: { + resource_name: "record", + collection_id: this.name, + }, + }); + // Keep latest entry ever, and latest within snapshot window. + // (history is sorted chronologically) + const latestEver = new Map(); + const latestInSnapshot = new Map(); + for (const entry of fullHistory) { + if (entry.target.data.last_modified <= at) { + // Snapshot includes changes right on timestamp. + latestInSnapshot.set(entry.record_id, entry); + } + latestEver.set(entry.record_id, entry); + } + // Current records ids in the collection. + const { data: current } = await this.listRecords({ + pages: Infinity, + fields: ["id"], // we don't need attributes. + }); + const currentIds = new Set(current.map(record => record.id)); + // If a record is not in the current collection, and its + // latest history entry isn't a delete then this means that + // it was deleted via the plural endpoint (and that we lost track + // of this deletion because of bug #963) + const deletedViaPlural = new Set(); + for (const entry of latestEver.values()) { + if (entry.action != "delete" && !currentIds.has(entry.record_id)) { + deletedViaPlural.add(entry.record_id); + } + } + // Now reconstruct the collection based on latest version in snapshot + // filtering all deleted records. + const reconstructed = []; + for (const entry of latestInSnapshot.values()) { + if (entry.action != "delete" && !deletedViaPlural.has(entry.record_id)) { + reconstructed.push(entry.target.data); + } + } + return { + last_modified: String(at), + data: Array.from(reconstructed).sort( + (a, b) => b.last_modified - a.last_modified + ), + next: () => { + throw new Error("Snapshots don't support pagination"); + }, + hasNextPage: false, + totalRecords: reconstructed.length, + }; + } + /** + * Performs batch operations at the current collection level. + * + * @param {Function} fn The batch operation function. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.retry] The retry option. + * @param {Boolean} [options.aggregate] Produces a grouped result object. + * @return {Promise<Object, Error>} + */ + async batch(fn, options = {}) { + return this.client.batch(fn, { + bucket: this.bucket.name, + collection: this.name, + headers: this._getHeaders(options), + retry: this._getRetry(options), + safe: this._getSafe(options), + aggregate: !!options.aggregate, + }); + } +} +__decorate( + [capable(["attachments"])], + Collection.prototype, + "addAttachment", + null +); +__decorate( + [capable(["attachments"])], + Collection.prototype, + "removeAttachment", + null +); +__decorate([capable(["history"])], Collection.prototype, "getSnapshot", null); + +/** + * Abstract representation of a selected bucket. + * + */ +class Bucket { + /** + * Constructor. + * + * @param {KintoClient} client The client instance. + * @param {String} name The bucket name. + * @param {Object} [options={}] The headers object option. + * @param {Object} [options.headers] The headers object option. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.retry] The retry option. + */ + constructor(client, name, options = {}) { + /** + * @ignore + */ + this.client = client; + /** + * The bucket name. + * @type {String} + */ + this.name = name; + this._endpoints = client.endpoints; + /** + * @ignore + */ + this._headers = options.headers || {}; + this._retry = options.retry || 0; + this._safe = !!options.safe; + } + get execute() { + return this.client.execute.bind(this.client); + } + get headers() { + return this._headers; + } + /** + * Get the value of "headers" for a given request, merging the + * per-request headers with our own "default" headers. + * + * @private + */ + _getHeaders(options) { + return { + ...this._headers, + ...options.headers, + }; + } + /** + * Get the value of "safe" for a given request, using the + * per-request option if present or falling back to our default + * otherwise. + * + * @private + * @param {Object} options The options for a request. + * @returns {Boolean} + */ + _getSafe(options) { + return { safe: this._safe, ...options }.safe; + } + /** + * As _getSafe, but for "retry". + * + * @private + */ + _getRetry(options) { + return { retry: this._retry, ...options }.retry; + } + /** + * Selects a collection. + * + * @param {String} name The collection name. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Boolean} [options.safe] The safe option. + * @return {Collection} + */ + collection(name, options = {}) { + return new Collection(this.client, this, name, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + safe: this._getSafe(options), + }); + } + /** + * Retrieves the ETag of the collection list, for use with the `since` filtering option. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<String, Error>} + */ + async getCollectionsTimestamp(options = {}) { + const path = this._endpoints.collection(this.name); + const request = { + headers: this._getHeaders(options), + path, + method: "HEAD", + }; + const { headers } = await this.client.execute(request, { + raw: true, + retry: this._getRetry(options), + }); + return headers.get("ETag"); + } + /** + * Retrieves the ETag of the group list, for use with the `since` filtering option. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<String, Error>} + */ + async getGroupsTimestamp(options = {}) { + const path = this._endpoints.group(this.name); + const request = { + headers: this._getHeaders(options), + path, + method: "HEAD", + }; + const { headers } = await this.client.execute(request, { + raw: true, + retry: this._getRetry(options), + }); + return headers.get("ETag"); + } + /** + * Retrieves bucket data. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Object} [options.query] Query parameters to pass in + * the request. This might be useful for features that aren't + * yet supported by this library. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async getData(options = {}) { + const path = this._endpoints.bucket(this.name); + const request = { + headers: this._getHeaders(options), + path, + }; + const { data } = await this.client.execute(request, { + retry: this._getRetry(options), + query: options.query, + fields: options.fields, + }); + return data; + } + /** + * Set bucket data. + * @param {Object} data The bucket data object. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers={}] The headers object option. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.patch] The patch option. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async setData(data, options = {}) { + if (!isObject(data)) { + throw new Error("A bucket object is required."); + } + const bucket = { + ...data, + id: this.name, + }; + // For default bucket, we need to drop the id from the data object. + // Bug in Kinto < 3.1.1 + const bucketId = bucket.id; + if (bucket.id === "default") { + delete bucket.id; + } + const path = this._endpoints.bucket(bucketId); + const { patch, permissions } = options; + const { last_modified } = { ...data, ...options }; + const request = updateRequest( + path, + { data: bucket, permissions }, + { + last_modified, + patch, + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Retrieves the list of history entries in the current bucket. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Array<Object>, Error>} + */ + async listHistory(options = {}) { + const path = this._endpoints.history(this.name); + return this.client.paginatedList(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Retrieves the list of collections in the current bucket. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.filters={}] The filters object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @return {Promise<Array<Object>, Error>} + */ + async listCollections(options = {}) { + const path = this._endpoints.collection(this.name); + return this.client.paginatedList(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Creates a new collection in current bucket. + * + * @param {String|undefined} id The collection id. + * @param {Object} [options={}] The options object. + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.permissions] The permissions object. + * @param {Object} [options.data] The data object. + * @return {Promise<Object, Error>} + */ + async createCollection(id, options = {}) { + const { permissions, data = {} } = options; + data.id = id; + const path = this._endpoints.collection(this.name, id); + const request = createRequest( + path, + { data, permissions }, + { + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Deletes a collection from the current bucket. + * + * @param {Object|String} collection The collection to delete. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async deleteCollection(collection, options = {}) { + const collectionObj = toDataBody(collection); + if (!collectionObj.id) { + throw new Error("A collection id is required."); + } + const { id } = collectionObj; + const { last_modified } = { ...collectionObj, ...options }; + const path = this._endpoints.collection(this.name, id); + const request = deleteRequest(path, { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Deletes collections from the current bucket. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.filters={}] The filters object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @return {Promise<Array<Object>, Error>} + */ + async deleteCollections(options = {}) { + const path = this._endpoints.collection(this.name); + return this.client.paginatedDelete(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Retrieves the list of groups in the current bucket. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.filters={}] The filters object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @return {Promise<Array<Object>, Error>} + */ + async listGroups(options = {}) { + const path = this._endpoints.group(this.name); + return this.client.paginatedList(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Fetches a group in current bucket. + * + * @param {String} id The group id. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.query] Query parameters to pass in + * the request. This might be useful for features that aren't + * yet supported by this library. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @return {Promise<Object, Error>} + */ + async getGroup(id, options = {}) { + const path = this._endpoints.group(this.name, id); + const request = { + headers: this._getHeaders(options), + path, + }; + return this.client.execute(request, { + retry: this._getRetry(options), + query: options.query, + fields: options.fields, + }); + } + /** + * Creates a new group in current bucket. + * + * @param {String|undefined} id The group id. + * @param {Array<String>} [members=[]] The list of principals. + * @param {Object} [options={}] The options object. + * @param {Object} [options.data] The data object. + * @param {Object} [options.permissions] The permissions object. + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async createGroup(id, members = [], options = {}) { + const data = { + ...options.data, + id, + members, + }; + const path = this._endpoints.group(this.name, id); + const { permissions } = options; + const request = createRequest( + path, + { data, permissions }, + { + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Updates an existing group in current bucket. + * + * @param {Object} group The group object. + * @param {Object} [options={}] The options object. + * @param {Object} [options.data] The data object. + * @param {Object} [options.permissions] The permissions object. + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async updateGroup(group, options = {}) { + if (!isObject(group)) { + throw new Error("A group object is required."); + } + if (!group.id) { + throw new Error("A group id is required."); + } + const data = { + ...options.data, + ...group, + }; + const path = this._endpoints.group(this.name, group.id); + const { patch, permissions } = options; + const { last_modified } = { ...data, ...options }; + const request = updateRequest( + path, + { data, permissions }, + { + last_modified, + patch, + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Deletes a group from the current bucket. + * + * @param {Object|String} group The group to delete. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async deleteGroup(group, options = {}) { + const groupObj = toDataBody(group); + const { id } = groupObj; + const { last_modified } = { ...groupObj, ...options }; + const path = this._endpoints.group(this.name, id); + const request = deleteRequest(path, { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Deletes groups from the current bucket. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.filters={}] The filters object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @return {Promise<Array<Object>, Error>} + */ + async deleteGroups(options = {}) { + const path = this._endpoints.group(this.name); + return this.client.paginatedDelete(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Retrieves the list of permissions for this bucket. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async getPermissions(options = {}) { + const request = { + headers: this._getHeaders(options), + path: this._endpoints.bucket(this.name), + }; + const { permissions } = await this.client.execute(request, { + retry: this._getRetry(options), + }); + return permissions; + } + /** + * Replaces all existing bucket permissions with the ones provided. + * + * @param {Object} permissions The permissions object. + * @param {Object} [options={}] The options object + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers={}] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async setPermissions(permissions, options = {}) { + if (!isObject(permissions)) { + throw new Error("A permissions object is required."); + } + const path = this._endpoints.bucket(this.name); + const { last_modified } = options; + const data = { last_modified }; + const request = updateRequest( + path, + { data, permissions }, + { + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Append principals to the bucket permissions. + * + * @param {Object} permissions The permissions object. + * @param {Object} [options={}] The options object + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async addPermissions(permissions, options = {}) { + if (!isObject(permissions)) { + throw new Error("A permissions object is required."); + } + const path = this._endpoints.bucket(this.name); + const { last_modified } = options; + const request = jsonPatchPermissionsRequest(path, permissions, "add", { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Remove principals from the bucket permissions. + * + * @param {Object} permissions The permissions object. + * @param {Object} [options={}] The options object + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async removePermissions(permissions, options = {}) { + if (!isObject(permissions)) { + throw new Error("A permissions object is required."); + } + const path = this._endpoints.bucket(this.name); + const { last_modified } = options; + const request = jsonPatchPermissionsRequest(path, permissions, "remove", { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }); + return this.client.execute(request, { + retry: this._getRetry(options), + }); + } + /** + * Performs batch operations at the current bucket level. + * + * @param {Function} fn The batch operation function. + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers] The headers object option. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.retry=0] The retry option. + * @param {Boolean} [options.aggregate] Produces a grouped result object. + * @return {Promise<Object, Error>} + */ + async batch(fn, options = {}) { + return this.client.batch(fn, { + bucket: this.name, + headers: this._getHeaders(options), + retry: this._getRetry(options), + safe: this._getSafe(options), + aggregate: !!options.aggregate, + }); + } +} +__decorate([capable(["history"])], Bucket.prototype, "listHistory", null); + +/** + * Currently supported protocol version. + * @type {String} + */ +const SUPPORTED_PROTOCOL_VERSION = "v1"; +/** + * High level HTTP client for the Kinto API. + * + * @example + * const client = new KintoClient("https://demo.kinto-storage.org/v1"); + * client.bucket("default") + * .collection("my-blog") + * .createRecord({title: "First article"}) + * .then(console.log.bind(console)) + * .catch(console.error.bind(console)); + */ +class KintoClientBase { + /** + * Constructor. + * + * @param {String} remote The remote URL. + * @param {Object} [options={}] The options object. + * @param {Boolean} [options.safe=true] Adds concurrency headers to every requests. + * @param {EventEmitter} [options.events=EventEmitter] The events handler instance. + * @param {Object} [options.headers={}] The key-value headers to pass to each request. + * @param {Object} [options.retry=0] Number of retries when request fails (default: 0) + * @param {String} [options.bucket="default"] The default bucket to use. + * @param {String} [options.requestMode="cors"] The HTTP request mode (from ES6 fetch spec). + * @param {Number} [options.timeout=null] The request timeout in ms, if any. + * @param {Function} [options.fetchFunc=fetch] The function to be used to execute HTTP requests. + */ + constructor(remote, options) { + if (typeof remote !== "string" || !remote.length) { + throw new Error("Invalid remote URL: " + remote); + } + if (remote[remote.length - 1] === "/") { + remote = remote.slice(0, -1); + } + this._backoffReleaseTime = null; + this._requests = []; + this._isBatch = !!options.batch; + this._retry = options.retry || 0; + this._safe = !!options.safe; + this._headers = options.headers || {}; + // public properties + /** + * The remote server base URL. + * @type {String} + */ + this.remote = remote; + /** + * Current server information. + * @ignore + * @type {Object|null} + */ + this.serverInfo = null; + /** + * The event emitter instance. Should comply with the `EventEmitter` + * interface. + * @ignore + * @type {Class} + */ + this.events = options.events; + this.endpoints = ENDPOINTS; + const { fetchFunc, requestMode, timeout } = options; + /** + * The HTTP instance. + * @ignore + * @type {HTTP} + */ + this.http = new HTTP(this.events, { fetchFunc, requestMode, timeout }); + this._registerHTTPEvents(); + } + /** + * The remote endpoint base URL. Setting the value will also extract and + * validate the version. + * @type {String} + */ + get remote() { + return this._remote; + } + /** + * @ignore + */ + set remote(url) { + let version; + try { + version = url.match(/\/(v\d+)\/?$/)[1]; + } catch (err) { + throw new Error("The remote URL must contain the version: " + url); + } + if (version !== SUPPORTED_PROTOCOL_VERSION) { + throw new Error(`Unsupported protocol version: ${version}`); + } + this._remote = url; + this._version = version; + } + /** + * The current server protocol version, eg. `v1`. + * @type {String} + */ + get version() { + return this._version; + } + /** + * Backoff remaining time, in milliseconds. Defaults to zero if no backoff is + * ongoing. + * + * @type {Number} + */ + get backoff() { + const currentTime = new Date().getTime(); + if (this._backoffReleaseTime && currentTime < this._backoffReleaseTime) { + return this._backoffReleaseTime - currentTime; + } + return 0; + } + /** + * Registers HTTP events. + * @private + */ + _registerHTTPEvents() { + // Prevent registering event from a batch client instance + if (!this._isBatch && this.events) { + this.events.on("backoff", backoffMs => { + this._backoffReleaseTime = backoffMs; + }); + } + } + /** + * Retrieve a bucket object to perform operations on it. + * + * @param {String} name The bucket name. + * @param {Object} [options={}] The request options. + * @param {Boolean} [options.safe] The resulting safe option. + * @param {Number} [options.retry] The resulting retry option. + * @param {Object} [options.headers] The extended headers object option. + * @return {Bucket} + */ + bucket(name, options = {}) { + return new Bucket(this, name, { + headers: this._getHeaders(options), + safe: this._getSafe(options), + retry: this._getRetry(options), + }); + } + /** + * Set client "headers" for every request, updating previous headers (if any). + * + * @param {Object} headers The headers to merge with existing ones. + */ + setHeaders(headers) { + this._headers = { + ...this._headers, + ...headers, + }; + this.serverInfo = null; + } + /** + * Get the value of "headers" for a given request, merging the + * per-request headers with our own "default" headers. + * + * Note that unlike other options, headers aren't overridden, but + * merged instead. + * + * @private + * @param {Object} options The options for a request. + * @returns {Object} + */ + _getHeaders(options) { + return { + ...this._headers, + ...options.headers, + }; + } + /** + * Get the value of "safe" for a given request, using the + * per-request option if present or falling back to our default + * otherwise. + * + * @private + * @param {Object} options The options for a request. + * @returns {Boolean} + */ + _getSafe(options) { + return { safe: this._safe, ...options }.safe; + } + /** + * As _getSafe, but for "retry". + * + * @private + */ + _getRetry(options) { + return { retry: this._retry, ...options }.retry; + } + /** + * Retrieves the server's "hello" endpoint. This endpoint reveals + * server capabilities and settings as well as telling the client + * "who they are" according to their given authorization headers. + * + * @private + * @param {Object} [options={}] The request options. + * @param {Object} [options.headers={}] Headers to use when making + * this request. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async _getHello(options = {}) { + const path = this.remote + ENDPOINTS.root(); + const { json } = await this.http.request( + path, + { headers: this._getHeaders(options) }, + { retry: this._getRetry(options) } + ); + return json; + } + /** + * Retrieves server information and persist them locally. This operation is + * usually performed a single time during the instance lifecycle. + * + * @param {Object} [options={}] The request options. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async fetchServerInfo(options = {}) { + if (this.serverInfo) { + return this.serverInfo; + } + this.serverInfo = await this._getHello({ retry: this._getRetry(options) }); + return this.serverInfo; + } + /** + * Retrieves Kinto server settings. + * + * @param {Object} [options={}] The request options. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async fetchServerSettings(options = {}) { + const { settings } = await this.fetchServerInfo(options); + return settings; + } + /** + * Retrieve server capabilities information. + * + * @param {Object} [options={}] The request options. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async fetchServerCapabilities(options = {}) { + const { capabilities } = await this.fetchServerInfo(options); + return capabilities; + } + /** + * Retrieve authenticated user information. + * + * @param {Object} [options={}] The request options. + * @param {Object} [options.headers={}] Headers to use when making + * this request. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async fetchUser(options = {}) { + const { user } = await this._getHello(options); + return user; + } + /** + * Retrieve authenticated user information. + * + * @param {Object} [options={}] The request options. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async fetchHTTPApiVersion(options = {}) { + const { http_api_version } = await this.fetchServerInfo(options); + return http_api_version; + } + /** + * Process batch requests, chunking them according to the batch_max_requests + * server setting when needed. + * + * @param {Array} requests The list of batch subrequests to perform. + * @param {Object} [options={}] The options object. + * @return {Promise<Object, Error>} + */ + async _batchRequests(requests, options = {}) { + const headers = this._getHeaders(options); + if (!requests.length) { + return []; + } + const serverSettings = await this.fetchServerSettings({ + retry: this._getRetry(options), + }); + const maxRequests = serverSettings.batch_max_requests; + if (maxRequests && requests.length > maxRequests) { + const chunks = partition(requests, maxRequests); + const results = []; + for (const chunk of chunks) { + const result = await this._batchRequests(chunk, options); + results.push(...result); + } + return results; + } + const { responses } = await this.execute( + { + // FIXME: is this really necessary, since it's also present in + // the "defaults"? + headers, + path: ENDPOINTS.batch(), + method: "POST", + body: { + defaults: { headers }, + requests, + }, + }, + { retry: this._getRetry(options) } + ); + return responses; + } + /** + * Sends batch requests to the remote server. + * + * Note: Reserved for internal use only. + * + * @ignore + * @param {Function} fn The function to use for describing batch ops. + * @param {Object} [options={}] The options object. + * @param {Boolean} [options.safe] The safe option. + * @param {Number} [options.retry] The retry option. + * @param {String} [options.bucket] The bucket name option. + * @param {String} [options.collection] The collection name option. + * @param {Object} [options.headers] The headers object option. + * @param {Boolean} [options.aggregate=false] Produces an aggregated result object. + * @return {Promise<Object, Error>} + */ + async batch(fn, options = {}) { + const rootBatch = new KintoClientBase(this.remote, { + events: this.events, + batch: true, + safe: this._getSafe(options), + retry: this._getRetry(options), + }); + if (options.bucket && options.collection) { + fn(rootBatch.bucket(options.bucket).collection(options.collection)); + } else if (options.bucket) { + fn(rootBatch.bucket(options.bucket)); + } else { + fn(rootBatch); + } + const responses = await this._batchRequests(rootBatch._requests, options); + if (options.aggregate) { + return aggregate(responses, rootBatch._requests); + } + return responses; + } + async execute(request, options = {}) { + const { raw = false, stringify = true } = options; + // If we're within a batch, add the request to the stack to send at once. + if (this._isBatch) { + this._requests.push(request); + // Resolve with a message in case people attempt at consuming the result + // from within a batch operation. + const msg = + "This result is generated from within a batch " + + "operation and should not be consumed."; + return raw ? { status: 0, json: msg, headers: new Headers() } : msg; + } + const uri = this.remote + addEndpointOptions(request.path, options); + const result = await this.http.request( + uri, + cleanUndefinedProperties({ + // Limit requests to only those parts that would be allowed in + // a batch request -- don't pass through other fancy fetch() + // options like integrity, redirect, mode because they will + // break on a batch request. A batch request only allows + // headers, method, path (above), and body. + method: request.method, + headers: request.headers, + body: stringify ? JSON.stringify(request.body) : request.body, + }), + { retry: this._getRetry(options) } + ); + return raw ? result : result.json; + } + /** + * Perform an operation with a given HTTP method on some pages from + * a paginated list, following the `next-page` header automatically + * until we have processed the requested number of pages. Return a + * response with a `.next()` method that can be called to perform + * the requested HTTP method on more results. + * + * @private + * @param {String} path + * The path to make the request to. + * @param {Object} params + * The parameters to use when making the request. + * @param {String} [params.sort="-last_modified"] + * The sorting order to use when doing operation on pages. + * @param {Object} [params.filters={}] + * The filters to send in the request. + * @param {Number} [params.limit=undefined] + * The limit to send in the request. Undefined means no limit. + * @param {Number} [params.pages=undefined] + * The number of pages to operate on. Undefined means one page. Pass + * Infinity to operate on everything. + * @param {String} [params.since=undefined] + * The ETag from which to start doing operation on pages. + * @param {Array} [params.fields] + * Limit response to just some fields. + * @param {Object} [options={}] + * Additional request-level parameters to use in all requests. + * @param {Object} [options.headers={}] + * Headers to use during all requests. + * @param {Number} [options.retry=0] + * Number of times to retry each request if the server responds + * with Retry-After. + * @param {String} [options.method="GET"] + * The method to use in the request. + */ + async paginatedOperation(path, params = {}, options = {}) { + // FIXME: this is called even in batch requests, which doesn't + // make any sense (since all batch requests get a "dummy" + // response; see execute() above). + const { sort, filters, limit, pages, since, fields } = { + sort: "-last_modified", + ...params, + }; + // Safety/Consistency check on ETag value. + if (since && typeof since !== "string") { + throw new Error( + `Invalid value for since (${since}), should be ETag value.` + ); + } + const query = { + ...filters, + _sort: sort, + _limit: limit, + _since: since, + }; + if (fields) { + query._fields = fields; + } + const querystring = qsify(query); + let results = [], + current = 0; + const next = async function (nextPage) { + if (!nextPage) { + throw new Error("Pagination exhausted."); + } + return processNextPage(nextPage); + }; + const processNextPage = async nextPage => { + const { headers } = options; + return handleResponse(await this.http.request(nextPage, { headers })); + }; + const pageResults = (results, nextPage, etag) => { + // ETag string is supposed to be opaque and stored «as-is». + // ETag header values are quoted (because of * and W/"foo"). + return { + last_modified: etag ? etag.replace(/"/g, "") : etag, + data: results, + next: next.bind(null, nextPage), + hasNextPage: !!nextPage, + totalRecords: -1, + }; + }; + const handleResponse = async function ({ + headers = new Headers(), + json = {}, + }) { + const nextPage = headers.get("Next-Page"); + const etag = headers.get("ETag"); + if (!pages) { + return pageResults(json.data, nextPage, etag); + } + // Aggregate new results with previous ones + results = results.concat(json.data); + current += 1; + if (current >= pages || !nextPage) { + // Pagination exhausted + return pageResults(results, nextPage, etag); + } + // Follow next page + return processNextPage(nextPage); + }; + return handleResponse( + await this.execute( + // N.B.: This doesn't use _getHeaders, because all calls to + // `paginatedList` are assumed to come from calls that already + // have headers merged at e.g. the bucket or collection level. + { + headers: options.headers ? options.headers : {}, + path: path + "?" + querystring, + method: options.method, + }, + // N.B. This doesn't use _getRetry, because all calls to + // `paginatedList` are assumed to come from calls that already + // used `_getRetry` at e.g. the bucket or collection level. + { raw: true, retry: options.retry || 0 } + ) + ); + } + /** + * Fetch some pages from a paginated list, following the `next-page` + * header automatically until we have fetched the requested number + * of pages. Return a response with a `.next()` method that can be + * called to fetch more results. + * + * @private + * @param {String} path + * The path to make the request to. + * @param {Object} params + * The parameters to use when making the request. + * @param {String} [params.sort="-last_modified"] + * The sorting order to use when fetching. + * @param {Object} [params.filters={}] + * The filters to send in the request. + * @param {Number} [params.limit=undefined] + * The limit to send in the request. Undefined means no limit. + * @param {Number} [params.pages=undefined] + * The number of pages to fetch. Undefined means one page. Pass + * Infinity to fetch everything. + * @param {String} [params.since=undefined] + * The ETag from which to start fetching. + * @param {Array} [params.fields] + * Limit response to just some fields. + * @param {Object} [options={}] + * Additional request-level parameters to use in all requests. + * @param {Object} [options.headers={}] + * Headers to use during all requests. + * @param {Number} [options.retry=0] + * Number of times to retry each request if the server responds + * with Retry-After. + */ + async paginatedList(path, params = {}, options = {}) { + return this.paginatedOperation(path, params, options); + } + /** + * Delete multiple objects, following the pagination if the number of + * objects exceeds the page limit until we have deleted the requested + * number of pages. Return a response with a `.next()` method that can + * be called to delete more results. + * + * @private + * @param {String} path + * The path to make the request to. + * @param {Object} params + * The parameters to use when making the request. + * @param {String} [params.sort="-last_modified"] + * The sorting order to use when deleting. + * @param {Object} [params.filters={}] + * The filters to send in the request. + * @param {Number} [params.limit=undefined] + * The limit to send in the request. Undefined means no limit. + * @param {Number} [params.pages=undefined] + * The number of pages to delete. Undefined means one page. Pass + * Infinity to delete everything. + * @param {String} [params.since=undefined] + * The ETag from which to start deleting. + * @param {Array} [params.fields] + * Limit response to just some fields. + * @param {Object} [options={}] + * Additional request-level parameters to use in all requests. + * @param {Object} [options.headers={}] + * Headers to use during all requests. + * @param {Number} [options.retry=0] + * Number of times to retry each request if the server responds + * with Retry-After. + */ + paginatedDelete(path, params = {}, options = {}) { + const { headers, safe, last_modified } = options; + const deleteRequest$1 = deleteRequest(path, { + headers, + safe: safe ? safe : false, + last_modified, + }); + return this.paginatedOperation(path, params, { + ...options, + headers: deleteRequest$1.headers, + method: "DELETE", + }); + } + /** + * Lists all permissions. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers={}] Headers to use when making + * this request. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object[], Error>} + */ + async listPermissions(options = {}) { + const path = ENDPOINTS.permissions(); + // Ensure the default sort parameter is something that exists in permissions + // entries, as `last_modified` doesn't; here, we pick "id". + const paginationOptions = { sort: "id", ...options }; + return this.paginatedList(path, paginationOptions, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Retrieves the list of buckets. + * + * @param {Object} [options={}] The options object. + * @param {Object} [options.headers={}] Headers to use when making + * this request. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.filters={}] The filters object. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @return {Promise<Object[], Error>} + */ + async listBuckets(options = {}) { + const path = ENDPOINTS.bucket(); + return this.paginatedList(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + }); + } + /** + * Creates a new bucket on the server. + * + * @param {String|null} id The bucket name (optional). + * @param {Object} [options={}] The options object. + * @param {Boolean} [options.data] The bucket data option. + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @return {Promise<Object, Error>} + */ + async createBucket(id, options = {}) { + const { data, permissions } = options; + const _data = { ...data, id: id ? id : undefined }; + const path = _data.id ? ENDPOINTS.bucket(_data.id) : ENDPOINTS.bucket(); + return this.execute( + createRequest( + path, + { data: _data, permissions }, + { + headers: this._getHeaders(options), + safe: this._getSafe(options), + } + ), + { retry: this._getRetry(options) } + ); + } + /** + * Deletes a bucket from the server. + * + * @ignore + * @param {Object|String} bucket The bucket to delete. + * @param {Object} [options={}] The options object. + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers] The headers object option. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object, Error>} + */ + async deleteBucket(bucket, options = {}) { + const bucketObj = toDataBody(bucket); + if (!bucketObj.id) { + throw new Error("A bucket id is required."); + } + const path = ENDPOINTS.bucket(bucketObj.id); + const { last_modified } = { ...bucketObj, ...options }; + return this.execute( + deleteRequest(path, { + last_modified, + headers: this._getHeaders(options), + safe: this._getSafe(options), + }), + { retry: this._getRetry(options) } + ); + } + /** + * Deletes buckets. + * + * @param {Object} [options={}] The options object. + * @param {Boolean} [options.safe] The safe option. + * @param {Object} [options.headers={}] Headers to use when making + * this request. + * @param {Number} [options.retry=0] Number of retries to make + * when faced with transient errors. + * @param {Object} [options.filters={}] The filters object. + * @param {Array} [options.fields] Limit response to + * just some fields. + * @param {Number} [options.last_modified] The last_modified option. + * @return {Promise<Object[], Error>} + */ + async deleteBuckets(options = {}) { + const path = ENDPOINTS.bucket(); + return this.paginatedDelete(path, options, { + headers: this._getHeaders(options), + retry: this._getRetry(options), + safe: options.safe, + last_modified: options.last_modified, + }); + } + async createAccount(username, password) { + return this.execute( + createRequest( + `/accounts/${username}`, + { data: { password } }, + { method: "PUT" } + ) + ); + } +} +__decorate( + [nobatch("This operation is not supported within a batch operation.")], + KintoClientBase.prototype, + "fetchServerSettings", + null +); +__decorate( + [nobatch("This operation is not supported within a batch operation.")], + KintoClientBase.prototype, + "fetchServerCapabilities", + null +); +__decorate( + [nobatch("This operation is not supported within a batch operation.")], + KintoClientBase.prototype, + "fetchUser", + null +); +__decorate( + [nobatch("This operation is not supported within a batch operation.")], + KintoClientBase.prototype, + "fetchHTTPApiVersion", + null +); +__decorate( + [nobatch("Can't use batch within a batch!")], + KintoClientBase.prototype, + "batch", + null +); +__decorate( + [capable(["permissions_endpoint"])], + KintoClientBase.prototype, + "listPermissions", + null +); +__decorate( + [support("1.4", "2.0")], + KintoClientBase.prototype, + "deleteBuckets", + null +); +__decorate( + [capable(["accounts"])], + KintoClientBase.prototype, + "createAccount", + null +); + +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* @ts-ignore */ +class KintoHttpClient extends KintoClientBase { + constructor(remote, options = {}) { + const events = {}; + EventEmitter.decorate(events); + super(remote, { events: events, ...options }); + } +} +KintoHttpClient.errors = errors; + +export { KintoHttpClient }; diff --git a/services/common/kinto-offline-client.js b/services/common/kinto-offline-client.js new file mode 100644 index 0000000000..7b11347555 --- /dev/null +++ b/services/common/kinto-offline-client.js @@ -0,0 +1,2643 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +"use strict"; + +/* + * This file is generated from kinto.js - do not modify directly. + */ + +// This is required because with Babel compiles ES2015 modules into a +// require() form that tries to keep its modules on "this", but +// doesn't specify "this", leaving it to default to the global +// object. However, in strict mode, "this" no longer defaults to the +// global object, so expose the global object explicitly. Babel's +// compiled output will use a variable called "global" if one is +// present. +// +// See https://bugzilla.mozilla.org/show_bug.cgi?id=1394556#c3 for +// more details. +const global = this; + +var EXPORTED_SYMBOLS = ["Kinto"]; + +/* + * Version 13.0.0 - 7fbf95d + */ + +(function (global, factory) { + typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : + typeof define === 'function' && define.amd ? define(factory) : + (global = global || self, global.Kinto = factory()); +}(this, (function () { 'use strict'; + + /** + * Base db adapter. + * + * @abstract + */ + class BaseAdapter { + /** + * Deletes every records present in the database. + * + * @abstract + * @return {Promise} + */ + clear() { + throw new Error("Not Implemented."); + } + /** + * Executes a batch of operations within a single transaction. + * + * @abstract + * @param {Function} callback The operation callback. + * @param {Object} options The options object. + * @return {Promise} + */ + execute(callback, options = { preload: [] }) { + throw new Error("Not Implemented."); + } + /** + * Retrieve a record by its primary key from the database. + * + * @abstract + * @param {String} id The record id. + * @return {Promise} + */ + get(id) { + throw new Error("Not Implemented."); + } + /** + * Lists all records from the database. + * + * @abstract + * @param {Object} params The filters and order to apply to the results. + * @return {Promise} + */ + list(params = { filters: {}, order: "" }) { + throw new Error("Not Implemented."); + } + /** + * Store the lastModified value. + * + * @abstract + * @param {Number} lastModified + * @return {Promise} + */ + saveLastModified(lastModified) { + throw new Error("Not Implemented."); + } + /** + * Retrieve saved lastModified value. + * + * @abstract + * @return {Promise} + */ + getLastModified() { + throw new Error("Not Implemented."); + } + /** + * Load records in bulk that were exported from a server. + * + * @abstract + * @param {Array} records The records to load. + * @return {Promise} + */ + importBulk(records) { + throw new Error("Not Implemented."); + } + /** + * Load a dump of records exported from a server. + * + * @deprecated Use {@link importBulk} instead. + * @abstract + * @param {Array} records The records to load. + * @return {Promise} + */ + loadDump(records) { + throw new Error("Not Implemented."); + } + saveMetadata(metadata) { + throw new Error("Not Implemented."); + } + getMetadata() { + throw new Error("Not Implemented."); + } + } + + const RE_RECORD_ID = /^[a-zA-Z0-9][a-zA-Z0-9_-]*$/; + /** + * Checks if a value is undefined. + * @param {Any} value + * @return {Boolean} + */ + function _isUndefined(value) { + return typeof value === "undefined"; + } + /** + * Sorts records in a list according to a given ordering. + * + * @param {String} order The ordering, eg. `-last_modified`. + * @param {Array} list The collection to order. + * @return {Array} + */ + function sortObjects(order, list) { + const hasDash = order[0] === "-"; + const field = hasDash ? order.slice(1) : order; + const direction = hasDash ? -1 : 1; + return list.slice().sort((a, b) => { + if (a[field] && _isUndefined(b[field])) { + return direction; + } + if (b[field] && _isUndefined(a[field])) { + return -direction; + } + if (_isUndefined(a[field]) && _isUndefined(b[field])) { + return 0; + } + return a[field] > b[field] ? direction : -direction; + }); + } + /** + * Test if a single object matches all given filters. + * + * @param {Object} filters The filters object. + * @param {Object} entry The object to filter. + * @return {Boolean} + */ + function filterObject(filters, entry) { + return Object.keys(filters).every(filter => { + const value = filters[filter]; + if (Array.isArray(value)) { + return value.some(candidate => candidate === entry[filter]); + } + else if (typeof value === "object") { + return filterObject(value, entry[filter]); + } + else if (!Object.prototype.hasOwnProperty.call(entry, filter)) { + console.error(`The property ${filter} does not exist`); + return false; + } + return entry[filter] === value; + }); + } + /** + * Resolves a list of functions sequentially, which can be sync or async; in + * case of async, functions must return a promise. + * + * @param {Array} fns The list of functions. + * @param {Any} init The initial value. + * @return {Promise} + */ + function waterfall(fns, init) { + if (!fns.length) { + return Promise.resolve(init); + } + return fns.reduce((promise, nextFn) => { + return promise.then(nextFn); + }, Promise.resolve(init)); + } + /** + * Simple deep object comparison function. This only supports comparison of + * serializable JavaScript objects. + * + * @param {Object} a The source object. + * @param {Object} b The compared object. + * @return {Boolean} + */ + function deepEqual(a, b) { + if (a === b) { + return true; + } + if (typeof a !== typeof b) { + return false; + } + if (!(a && typeof a == "object") || !(b && typeof b == "object")) { + return false; + } + if (Object.keys(a).length !== Object.keys(b).length) { + return false; + } + for (const k in a) { + if (!deepEqual(a[k], b[k])) { + return false; + } + } + return true; + } + /** + * Return an object without the specified keys. + * + * @param {Object} obj The original object. + * @param {Array} keys The list of keys to exclude. + * @return {Object} A copy without the specified keys. + */ + function omitKeys(obj, keys = []) { + const result = Object.assign({}, obj); + for (const key of keys) { + delete result[key]; + } + return result; + } + function arrayEqual(a, b) { + if (a.length !== b.length) { + return false; + } + for (let i = a.length; i--;) { + if (a[i] !== b[i]) { + return false; + } + } + return true; + } + function makeNestedObjectFromArr(arr, val, nestedFiltersObj) { + const last = arr.length - 1; + return arr.reduce((acc, cv, i) => { + if (i === last) { + return (acc[cv] = val); + } + else if (Object.prototype.hasOwnProperty.call(acc, cv)) { + return acc[cv]; + } + else { + return (acc[cv] = {}); + } + }, nestedFiltersObj); + } + function transformSubObjectFilters(filtersObj) { + const transformedFilters = {}; + for (const key in filtersObj) { + const keysArr = key.split("."); + const val = filtersObj[key]; + makeNestedObjectFromArr(keysArr, val, transformedFilters); + } + return transformedFilters; + } + + const INDEXED_FIELDS = ["id", "_status", "last_modified"]; + /** + * Small helper that wraps the opening of an IndexedDB into a Promise. + * + * @param dbname {String} The database name. + * @param version {Integer} Schema version + * @param onupgradeneeded {Function} The callback to execute if schema is + * missing or different. + * @return {Promise<IDBDatabase>} + */ + async function open(dbname, { version, onupgradeneeded }) { + return new Promise((resolve, reject) => { + const request = indexedDB.open(dbname, version); + request.onupgradeneeded = event => { + const db = event.target.result; + db.onerror = event => reject(event.target.error); + // When an upgrade is needed, a transaction is started. + const transaction = event.target.transaction; + transaction.onabort = event => { + const error = event.target.error || + transaction.error || + new DOMException("The operation has been aborted", "AbortError"); + reject(error); + }; + // Callback for store creation etc. + return onupgradeneeded(event); + }; + request.onerror = event => { + reject(event.target.error); + }; + request.onsuccess = event => { + const db = event.target.result; + resolve(db); + }; + }); + } + /** + * Helper to run the specified callback in a single transaction on the + * specified store. + * The helper focuses on transaction wrapping into a promise. + * + * @param db {IDBDatabase} The database instance. + * @param name {String} The store name. + * @param callback {Function} The piece of code to execute in the transaction. + * @param options {Object} Options. + * @param options.mode {String} Transaction mode (default: read). + * @return {Promise} any value returned by the callback. + */ + async function execute(db, name, callback, options = {}) { + const { mode } = options; + return new Promise((resolve, reject) => { + // On Safari, calling IDBDatabase.transaction with mode == undefined raises + // a TypeError. + const transaction = mode + ? db.transaction([name], mode) + : db.transaction([name]); + const store = transaction.objectStore(name); + // Let the callback abort this transaction. + const abort = e => { + transaction.abort(); + reject(e); + }; + // Execute the specified callback **synchronously**. + let result; + try { + result = callback(store, abort); + } + catch (e) { + abort(e); + } + transaction.onerror = event => reject(event.target.error); + transaction.oncomplete = event => resolve(result); + transaction.onabort = event => { + const error = event.target.error || + transaction.error || + new DOMException("The operation has been aborted", "AbortError"); + reject(error); + }; + }); + } + /** + * Helper to wrap the deletion of an IndexedDB database into a promise. + * + * @param dbName {String} the database to delete + * @return {Promise} + */ + async function deleteDatabase(dbName) { + return new Promise((resolve, reject) => { + const request = indexedDB.deleteDatabase(dbName); + request.onsuccess = event => resolve(event.target); + request.onerror = event => reject(event.target.error); + }); + } + /** + * IDB cursor handlers. + * @type {Object} + */ + const cursorHandlers = { + all(filters, done) { + const results = []; + return event => { + const cursor = event.target.result; + if (cursor) { + const { value } = cursor; + if (filterObject(filters, value)) { + results.push(value); + } + cursor.continue(); + } + else { + done(results); + } + }; + }, + in(values, filters, done) { + const results = []; + let i = 0; + return function (event) { + const cursor = event.target.result; + if (!cursor) { + done(results); + return; + } + const { key, value } = cursor; + // `key` can be an array of two values (see `keyPath` in indices definitions). + // `values` can be an array of arrays if we filter using an index whose key path + // is an array (eg. `cursorHandlers.in([["bid/cid", 42], ["bid/cid", 43]], ...)`) + while (key > values[i]) { + // The cursor has passed beyond this key. Check next. + ++i; + if (i === values.length) { + done(results); // There is no next. Stop searching. + return; + } + } + const isEqual = Array.isArray(key) + ? arrayEqual(key, values[i]) + : key === values[i]; + if (isEqual) { + if (filterObject(filters, value)) { + results.push(value); + } + cursor.continue(); + } + else { + cursor.continue(values[i]); + } + }; + }, + }; + /** + * Creates an IDB request and attach it the appropriate cursor event handler to + * perform a list query. + * + * Multiple matching values are handled by passing an array. + * + * @param {String} cid The collection id (ie. `{bid}/{cid}`) + * @param {IDBStore} store The IDB store. + * @param {Object} filters Filter the records by field. + * @param {Function} done The operation completion handler. + * @return {IDBRequest} + */ + function createListRequest(cid, store, filters, done) { + const filterFields = Object.keys(filters); + // If no filters, get all results in one bulk. + if (filterFields.length == 0) { + const request = store.index("cid").getAll(IDBKeyRange.only(cid)); + request.onsuccess = event => done(event.target.result); + return request; + } + // Introspect filters and check if they leverage an indexed field. + const indexField = filterFields.find(field => { + return INDEXED_FIELDS.includes(field); + }); + if (!indexField) { + // Iterate on all records for this collection (ie. cid) + const isSubQuery = Object.keys(filters).some(key => key.includes(".")); // (ie. filters: {"article.title": "hello"}) + if (isSubQuery) { + const newFilter = transformSubObjectFilters(filters); + const request = store.index("cid").openCursor(IDBKeyRange.only(cid)); + request.onsuccess = cursorHandlers.all(newFilter, done); + return request; + } + const request = store.index("cid").openCursor(IDBKeyRange.only(cid)); + request.onsuccess = cursorHandlers.all(filters, done); + return request; + } + // If `indexField` was used already, don't filter again. + const remainingFilters = omitKeys(filters, [indexField]); + // value specified in the filter (eg. `filters: { _status: ["created", "updated"] }`) + const value = filters[indexField]; + // For the "id" field, use the primary key. + const indexStore = indexField == "id" ? store : store.index(indexField); + // WHERE IN equivalent clause + if (Array.isArray(value)) { + if (value.length === 0) { + return done([]); + } + const values = value.map(i => [cid, i]).sort(); + const range = IDBKeyRange.bound(values[0], values[values.length - 1]); + const request = indexStore.openCursor(range); + request.onsuccess = cursorHandlers.in(values, remainingFilters, done); + return request; + } + // If no filters on custom attribute, get all results in one bulk. + if (remainingFilters.length == 0) { + const request = indexStore.getAll(IDBKeyRange.only([cid, value])); + request.onsuccess = event => done(event.target.result); + return request; + } + // WHERE field = value clause + const request = indexStore.openCursor(IDBKeyRange.only([cid, value])); + request.onsuccess = cursorHandlers.all(remainingFilters, done); + return request; + } + class IDBError extends Error { + constructor(method, err) { + super(`IndexedDB ${method}() ${err.message}`); + this.name = err.name; + this.stack = err.stack; + } + } + /** + * IndexedDB adapter. + * + * This adapter doesn't support any options. + */ + class IDB extends BaseAdapter { + /* Expose the IDBError class publicly */ + static get IDBError() { + return IDBError; + } + /** + * Constructor. + * + * @param {String} cid The key base for this collection (eg. `bid/cid`) + * @param {Object} options + * @param {String} options.dbName The IndexedDB name (default: `"KintoDB"`) + * @param {String} options.migrateOldData Whether old database data should be migrated (default: `false`) + */ + constructor(cid, options = {}) { + super(); + this.cid = cid; + this.dbName = options.dbName || "KintoDB"; + this._options = options; + this._db = null; + } + _handleError(method, err) { + throw new IDBError(method, err); + } + /** + * Ensures a connection to the IndexedDB database has been opened. + * + * @override + * @return {Promise} + */ + async open() { + if (this._db) { + return this; + } + // In previous versions, we used to have a database with name `${bid}/${cid}`. + // Check if it exists, and migrate data once new schema is in place. + // Note: the built-in migrations from IndexedDB can only be used if the + // database name does not change. + const dataToMigrate = this._options.migrateOldData + ? await migrationRequired(this.cid) + : null; + this._db = await open(this.dbName, { + version: 2, + onupgradeneeded: event => { + const db = event.target.result; + if (event.oldVersion < 1) { + // Records store + const recordsStore = db.createObjectStore("records", { + keyPath: ["_cid", "id"], + }); + // An index to obtain all the records in a collection. + recordsStore.createIndex("cid", "_cid"); + // Here we create indices for every known field in records by collection. + // Local record status ("synced", "created", "updated", "deleted") + recordsStore.createIndex("_status", ["_cid", "_status"]); + // Last modified field + recordsStore.createIndex("last_modified", ["_cid", "last_modified"]); + // Timestamps store + db.createObjectStore("timestamps", { + keyPath: "cid", + }); + } + if (event.oldVersion < 2) { + // Collections store + db.createObjectStore("collections", { + keyPath: "cid", + }); + } + }, + }); + if (dataToMigrate) { + const { records, timestamp } = dataToMigrate; + await this.importBulk(records); + await this.saveLastModified(timestamp); + console.log(`${this.cid}: data was migrated successfully.`); + // Delete the old database. + await deleteDatabase(this.cid); + console.warn(`${this.cid}: old database was deleted.`); + } + return this; + } + /** + * Closes current connection to the database. + * + * @override + * @return {Promise} + */ + close() { + if (this._db) { + this._db.close(); // indexedDB.close is synchronous + this._db = null; + } + return Promise.resolve(); + } + /** + * Returns a transaction and an object store for a store name. + * + * To determine if a transaction has completed successfully, we should rather + * listen to the transaction’s complete event rather than the IDBObjectStore + * request’s success event, because the transaction may still fail after the + * success event fires. + * + * @param {String} name Store name + * @param {Function} callback to execute + * @param {Object} options Options + * @param {String} options.mode Transaction mode ("readwrite" or undefined) + * @return {Object} + */ + async prepare(name, callback, options) { + await this.open(); + await execute(this._db, name, callback, options); + } + /** + * Deletes every records in the current collection. + * + * @override + * @return {Promise} + */ + async clear() { + try { + await this.prepare("records", store => { + const range = IDBKeyRange.only(this.cid); + const request = store.index("cid").openKeyCursor(range); + request.onsuccess = event => { + const cursor = event.target.result; + if (cursor) { + store.delete(cursor.primaryKey); + cursor.continue(); + } + }; + return request; + }, { mode: "readwrite" }); + } + catch (e) { + this._handleError("clear", e); + } + } + /** + * Executes the set of synchronous CRUD operations described in the provided + * callback within an IndexedDB transaction, for current db store. + * + * The callback will be provided an object exposing the following synchronous + * CRUD operation methods: get, create, update, delete. + * + * Important note: because limitations in IndexedDB implementations, no + * asynchronous code should be performed within the provided callback; the + * promise will therefore be rejected if the callback returns a Promise. + * + * Options: + * - {Array} preload: The list of record IDs to fetch and make available to + * the transaction object get() method (default: []) + * + * @example + * const db = new IDB("example"); + * const result = await db.execute(transaction => { + * transaction.create({id: 1, title: "foo"}); + * transaction.update({id: 2, title: "bar"}); + * transaction.delete(3); + * return "foo"; + * }); + * + * @override + * @param {Function} callback The operation description callback. + * @param {Object} options The options object. + * @return {Promise} + */ + async execute(callback, options = { preload: [] }) { + // Transactions in IndexedDB are autocommited when a callback does not + // perform any additional operation. + // The way Promises are implemented in Firefox (see https://bugzilla.mozilla.org/show_bug.cgi?id=1193394) + // prevents using within an opened transaction. + // To avoid managing asynchronocity in the specified `callback`, we preload + // a list of record in order to execute the `callback` synchronously. + // See also: + // - http://stackoverflow.com/a/28388805/330911 + // - http://stackoverflow.com/a/10405196 + // - https://jakearchibald.com/2015/tasks-microtasks-queues-and-schedules/ + let result; + await this.prepare("records", (store, abort) => { + const runCallback = (preloaded = []) => { + // Expose a consistent API for every adapter instead of raw store methods. + const proxy = transactionProxy(this, store, preloaded); + // The callback is executed synchronously within the same transaction. + try { + const returned = callback(proxy); + if (returned instanceof Promise) { + // XXX: investigate how to provide documentation details in error. + throw new Error("execute() callback should not return a Promise."); + } + // Bring to scope that will be returned (once promise awaited). + result = returned; + } + catch (e) { + // The callback has thrown an error explicitly. Abort transaction cleanly. + abort(e); + } + }; + // No option to preload records, go straight to `callback`. + if (!options.preload.length) { + return runCallback(); + } + // Preload specified records using a list request. + const filters = { id: options.preload }; + createListRequest(this.cid, store, filters, records => { + // Store obtained records by id. + const preloaded = {}; + for (const record of records) { + delete record["_cid"]; + preloaded[record.id] = record; + } + runCallback(preloaded); + }); + }, { mode: "readwrite" }); + return result; + } + /** + * Retrieve a record by its primary key from the IndexedDB database. + * + * @override + * @param {String} id The record id. + * @return {Promise} + */ + async get(id) { + try { + let record; + await this.prepare("records", store => { + store.get([this.cid, id]).onsuccess = e => (record = e.target.result); + }); + return record; + } + catch (e) { + this._handleError("get", e); + } + } + /** + * Lists all records from the IndexedDB database. + * + * @override + * @param {Object} params The filters and order to apply to the results. + * @return {Promise} + */ + async list(params = { filters: {} }) { + const { filters } = params; + try { + let results = []; + await this.prepare("records", store => { + createListRequest(this.cid, store, filters, _results => { + // we have received all requested records that match the filters, + // we now park them within current scope and hide the `_cid` attribute. + for (const result of _results) { + delete result["_cid"]; + } + results = _results; + }); + }); + // The resulting list of records is sorted. + // XXX: with some efforts, this could be fully implemented using IDB API. + return params.order ? sortObjects(params.order, results) : results; + } + catch (e) { + this._handleError("list", e); + } + } + /** + * Store the lastModified value into metadata store. + * + * @override + * @param {Number} lastModified + * @return {Promise} + */ + async saveLastModified(lastModified) { + const value = parseInt(lastModified, 10) || null; + try { + await this.prepare("timestamps", store => { + if (value === null) { + store.delete(this.cid); + } + else { + store.put({ cid: this.cid, value }); + } + }, { mode: "readwrite" }); + return value; + } + catch (e) { + this._handleError("saveLastModified", e); + } + } + /** + * Retrieve saved lastModified value. + * + * @override + * @return {Promise} + */ + async getLastModified() { + try { + let entry = null; + await this.prepare("timestamps", store => { + store.get(this.cid).onsuccess = e => (entry = e.target.result); + }); + return entry ? entry.value : null; + } + catch (e) { + this._handleError("getLastModified", e); + } + } + /** + * Load a dump of records exported from a server. + * + * @deprecated Use {@link importBulk} instead. + * @abstract + * @param {Array} records The records to load. + * @return {Promise} + */ + async loadDump(records) { + return this.importBulk(records); + } + /** + * Load records in bulk that were exported from a server. + * + * @abstract + * @param {Array} records The records to load. + * @return {Promise} + */ + async importBulk(records) { + try { + await this.execute(transaction => { + // Since the put operations are asynchronous, we chain + // them together. The last one will be waited for the + // `transaction.oncomplete` callback. (see #execute()) + let i = 0; + putNext(); + function putNext() { + if (i == records.length) { + return; + } + // On error, `transaction.onerror` is called. + transaction.update(records[i]).onsuccess = putNext; + ++i; + } + }); + const previousLastModified = await this.getLastModified(); + const lastModified = Math.max(...records.map(record => record.last_modified)); + if (lastModified > previousLastModified) { + await this.saveLastModified(lastModified); + } + return records; + } + catch (e) { + this._handleError("importBulk", e); + } + } + async saveMetadata(metadata) { + try { + await this.prepare("collections", store => store.put({ cid: this.cid, metadata }), { mode: "readwrite" }); + return metadata; + } + catch (e) { + this._handleError("saveMetadata", e); + } + } + async getMetadata() { + try { + let entry = null; + await this.prepare("collections", store => { + store.get(this.cid).onsuccess = e => (entry = e.target.result); + }); + return entry ? entry.metadata : null; + } + catch (e) { + this._handleError("getMetadata", e); + } + } + } + /** + * IDB transaction proxy. + * + * @param {IDB} adapter The call IDB adapter + * @param {IDBStore} store The IndexedDB database store. + * @param {Array} preloaded The list of records to make available to + * get() (default: []). + * @return {Object} + */ + function transactionProxy(adapter, store, preloaded = []) { + const _cid = adapter.cid; + return { + create(record) { + store.add(Object.assign(Object.assign({}, record), { _cid })); + }, + update(record) { + return store.put(Object.assign(Object.assign({}, record), { _cid })); + }, + delete(id) { + store.delete([_cid, id]); + }, + get(id) { + return preloaded[id]; + }, + }; + } + /** + * Up to version 10.X of kinto.js, each collection had its own collection. + * The database name was `${bid}/${cid}` (eg. `"blocklists/certificates"`) + * and contained only one store with the same name. + */ + async function migrationRequired(dbName) { + let exists = true; + const db = await open(dbName, { + version: 1, + onupgradeneeded: event => { + exists = false; + }, + }); + // Check that the DB we're looking at is really a legacy one, + // and not some remainder of the open() operation above. + exists &= + db.objectStoreNames.contains("__meta__") && + db.objectStoreNames.contains(dbName); + if (!exists) { + db.close(); + // Testing the existence creates it, so delete it :) + await deleteDatabase(dbName); + return null; + } + console.warn(`${dbName}: old IndexedDB database found.`); + try { + // Scan all records. + let records; + await execute(db, dbName, store => { + store.openCursor().onsuccess = cursorHandlers.all({}, res => (records = res)); + }); + console.log(`${dbName}: found ${records.length} records.`); + // Check if there's a entry for this. + let timestamp = null; + await execute(db, "__meta__", store => { + store.get(`${dbName}-lastModified`).onsuccess = e => { + timestamp = e.target.result ? e.target.result.value : null; + }; + }); + // Some previous versions, also used to store the timestamps without prefix. + if (!timestamp) { + await execute(db, "__meta__", store => { + store.get("lastModified").onsuccess = e => { + timestamp = e.target.result ? e.target.result.value : null; + }; + }); + } + console.log(`${dbName}: ${timestamp ? "found" : "no"} timestamp.`); + // Those will be inserted in the new database/schema. + return { records, timestamp }; + } + catch (e) { + console.error("Error occured during migration", e); + return null; + } + finally { + db.close(); + } + } + + var uuid4 = {}; + + const RECORD_FIELDS_TO_CLEAN = ["_status"]; + const AVAILABLE_HOOKS = ["incoming-changes"]; + const IMPORT_CHUNK_SIZE = 200; + /** + * Compare two records omitting local fields and synchronization + * attributes (like _status and last_modified) + * @param {Object} a A record to compare. + * @param {Object} b A record to compare. + * @param {Array} localFields Additional fields to ignore during the comparison + * @return {boolean} + */ + function recordsEqual(a, b, localFields = []) { + const fieldsToClean = RECORD_FIELDS_TO_CLEAN.concat(["last_modified"]).concat(localFields); + const cleanLocal = r => omitKeys(r, fieldsToClean); + return deepEqual(cleanLocal(a), cleanLocal(b)); + } + /** + * Synchronization result object. + */ + class SyncResultObject { + /** + * Public constructor. + */ + constructor() { + /** + * Current synchronization result status; becomes `false` when conflicts or + * errors are registered. + * @type {Boolean} + */ + this.lastModified = null; + this._lists = {}; + [ + "errors", + "created", + "updated", + "deleted", + "published", + "conflicts", + "skipped", + "resolved", + "void", + ].forEach(l => (this._lists[l] = [])); + this._cached = {}; + } + /** + * Adds entries for a given result type. + * + * @param {String} type The result type. + * @param {Array} entries The result entries. + * @return {SyncResultObject} + */ + add(type, entries) { + if (!Array.isArray(this._lists[type])) { + console.warn(`Unknown type "${type}"`); + return; + } + if (!Array.isArray(entries)) { + entries = [entries]; + } + this._lists[type] = this._lists[type].concat(entries); + delete this._cached[type]; + return this; + } + get ok() { + return this.errors.length + this.conflicts.length === 0; + } + get errors() { + return this._lists["errors"]; + } + get conflicts() { + return this._lists["conflicts"]; + } + get skipped() { + return this._deduplicate("skipped"); + } + get resolved() { + return this._deduplicate("resolved"); + } + get created() { + return this._deduplicate("created"); + } + get updated() { + return this._deduplicate("updated"); + } + get deleted() { + return this._deduplicate("deleted"); + } + get published() { + return this._deduplicate("published"); + } + _deduplicate(list) { + if (!(list in this._cached)) { + // Deduplicate entries by id. If the values don't have `id` attribute, just + // keep all. + const recordsWithoutId = new Set(); + const recordsById = new Map(); + this._lists[list].forEach(record => { + if (!record.id) { + recordsWithoutId.add(record); + } + else { + recordsById.set(record.id, record); + } + }); + this._cached[list] = Array.from(recordsById.values()).concat(Array.from(recordsWithoutId)); + } + return this._cached[list]; + } + /** + * Reinitializes result entries for a given result type. + * + * @param {String} type The result type. + * @return {SyncResultObject} + */ + reset(type) { + this._lists[type] = []; + delete this._cached[type]; + return this; + } + toObject() { + // Only used in tests. + return { + ok: this.ok, + lastModified: this.lastModified, + errors: this.errors, + created: this.created, + updated: this.updated, + deleted: this.deleted, + skipped: this.skipped, + published: this.published, + conflicts: this.conflicts, + resolved: this.resolved, + }; + } + } + class ServerWasFlushedError extends Error { + constructor(clientTimestamp, serverTimestamp, message) { + super(message); + if (Error.captureStackTrace) { + Error.captureStackTrace(this, ServerWasFlushedError); + } + this.clientTimestamp = clientTimestamp; + this.serverTimestamp = serverTimestamp; + } + } + function createUUIDSchema() { + return { + generate() { + return uuid4(); + }, + validate(id) { + return typeof id == "string" && RE_RECORD_ID.test(id); + }, + }; + } + function markStatus(record, status) { + return Object.assign(Object.assign({}, record), { _status: status }); + } + function markDeleted(record) { + return markStatus(record, "deleted"); + } + function markSynced(record) { + return markStatus(record, "synced"); + } + /** + * Import a remote change into the local database. + * + * @param {IDBTransactionProxy} transaction The transaction handler. + * @param {Object} remote The remote change object to import. + * @param {Array<String>} localFields The list of fields that remain local. + * @param {String} strategy The {@link Collection.strategy}. + * @return {Object} + */ + function importChange(transaction, remote, localFields, strategy) { + const local = transaction.get(remote.id); + if (!local) { + // Not found locally but remote change is marked as deleted; skip to + // avoid recreation. + if (remote.deleted) { + return { type: "skipped", data: remote }; + } + const synced = markSynced(remote); + transaction.create(synced); + return { type: "created", data: synced }; + } + // Apply remote changes on local record. + const synced = Object.assign(Object.assign({}, local), markSynced(remote)); + // With pull only, we don't need to compare records since we override them. + if (strategy === Collection.strategy.PULL_ONLY) { + if (remote.deleted) { + transaction.delete(remote.id); + return { type: "deleted", data: local }; + } + transaction.update(synced); + return { type: "updated", data: { old: local, new: synced } }; + } + // With other sync strategies, we detect conflicts, + // by comparing local and remote, ignoring local fields. + const isIdentical = recordsEqual(local, remote, localFields); + // Detect or ignore conflicts if record has also been modified locally. + if (local._status !== "synced") { + // Locally deleted, unsynced: scheduled for remote deletion. + if (local._status === "deleted") { + return { type: "skipped", data: local }; + } + if (isIdentical) { + // If records are identical, import anyway, so we bump the + // local last_modified value from the server and set record + // status to "synced". + transaction.update(synced); + return { type: "updated", data: { old: local, new: synced } }; + } + if (local.last_modified !== undefined && + local.last_modified === remote.last_modified) { + // If our local version has the same last_modified as the remote + // one, this represents an object that corresponds to a resolved + // conflict. Our local version represents the final output, so + // we keep that one. (No transaction operation to do.) + // But if our last_modified is undefined, + // that means we've created the same object locally as one on + // the server, which *must* be a conflict. + return { type: "void" }; + } + return { + type: "conflicts", + data: { type: "incoming", local: local, remote: remote }, + }; + } + // Local record was synced. + if (remote.deleted) { + transaction.delete(remote.id); + return { type: "deleted", data: local }; + } + // Import locally. + transaction.update(synced); + // if identical, simply exclude it from all SyncResultObject lists + const type = isIdentical ? "void" : "updated"; + return { type, data: { old: local, new: synced } }; + } + /** + * Abstracts a collection of records stored in the local database, providing + * CRUD operations and synchronization helpers. + */ + class Collection { + /** + * Constructor. + * + * Options: + * - `{BaseAdapter} adapter` The DB adapter (default: `IDB`) + * + * @param {String} bucket The bucket identifier. + * @param {String} name The collection name. + * @param {KintoBase} kinto The Kinto instance. + * @param {Object} options The options object. + */ + constructor(bucket, name, kinto, options = {}) { + this._bucket = bucket; + this._name = name; + this._lastModified = null; + const DBAdapter = options.adapter || IDB; + if (!DBAdapter) { + throw new Error("No adapter provided"); + } + const db = new DBAdapter(`${bucket}/${name}`, options.adapterOptions); + if (!(db instanceof BaseAdapter)) { + throw new Error("Unsupported adapter."); + } + // public properties + /** + * The db adapter instance + * @type {BaseAdapter} + */ + this.db = db; + /** + * The KintoBase instance. + * @type {KintoBase} + */ + this.kinto = kinto; + /** + * The event emitter instance. + * @type {EventEmitter} + */ + this.events = options.events; + /** + * The IdSchema instance. + * @type {Object} + */ + this.idSchema = this._validateIdSchema(options.idSchema); + /** + * The list of remote transformers. + * @type {Array} + */ + this.remoteTransformers = this._validateRemoteTransformers(options.remoteTransformers); + /** + * The list of hooks. + * @type {Object} + */ + this.hooks = this._validateHooks(options.hooks); + /** + * The list of fields names that will remain local. + * @type {Array} + */ + this.localFields = options.localFields || []; + } + /** + * The HTTP client. + * @type {KintoClient} + */ + get api() { + return this.kinto.api; + } + /** + * The collection name. + * @type {String} + */ + get name() { + return this._name; + } + /** + * The bucket name. + * @type {String} + */ + get bucket() { + return this._bucket; + } + /** + * The last modified timestamp. + * @type {Number} + */ + get lastModified() { + return this._lastModified; + } + /** + * Synchronization strategies. Available strategies are: + * + * - `MANUAL`: Conflicts will be reported in a dedicated array. + * - `SERVER_WINS`: Conflicts are resolved using remote data. + * - `CLIENT_WINS`: Conflicts are resolved using local data. + * + * @type {Object} + */ + static get strategy() { + return { + CLIENT_WINS: "client_wins", + SERVER_WINS: "server_wins", + PULL_ONLY: "pull_only", + MANUAL: "manual", + }; + } + /** + * Validates an idSchema. + * + * @param {Object|undefined} idSchema + * @return {Object} + */ + _validateIdSchema(idSchema) { + if (typeof idSchema === "undefined") { + return createUUIDSchema(); + } + if (typeof idSchema !== "object") { + throw new Error("idSchema must be an object."); + } + else if (typeof idSchema.generate !== "function") { + throw new Error("idSchema must provide a generate function."); + } + else if (typeof idSchema.validate !== "function") { + throw new Error("idSchema must provide a validate function."); + } + return idSchema; + } + /** + * Validates a list of remote transformers. + * + * @param {Array|undefined} remoteTransformers + * @return {Array} + */ + _validateRemoteTransformers(remoteTransformers) { + if (typeof remoteTransformers === "undefined") { + return []; + } + if (!Array.isArray(remoteTransformers)) { + throw new Error("remoteTransformers should be an array."); + } + return remoteTransformers.map(transformer => { + if (typeof transformer !== "object") { + throw new Error("A transformer must be an object."); + } + else if (typeof transformer.encode !== "function") { + throw new Error("A transformer must provide an encode function."); + } + else if (typeof transformer.decode !== "function") { + throw new Error("A transformer must provide a decode function."); + } + return transformer; + }); + } + /** + * Validate the passed hook is correct. + * + * @param {Array|undefined} hook. + * @return {Array} + **/ + _validateHook(hook) { + if (!Array.isArray(hook)) { + throw new Error("A hook definition should be an array of functions."); + } + return hook.map(fn => { + if (typeof fn !== "function") { + throw new Error("A hook definition should be an array of functions."); + } + return fn; + }); + } + /** + * Validates a list of hooks. + * + * @param {Object|undefined} hooks + * @return {Object} + */ + _validateHooks(hooks) { + if (typeof hooks === "undefined") { + return {}; + } + if (Array.isArray(hooks)) { + throw new Error("hooks should be an object, not an array."); + } + if (typeof hooks !== "object") { + throw new Error("hooks should be an object."); + } + const validatedHooks = {}; + for (const hook in hooks) { + if (!AVAILABLE_HOOKS.includes(hook)) { + throw new Error("The hook should be one of " + AVAILABLE_HOOKS.join(", ")); + } + validatedHooks[hook] = this._validateHook(hooks[hook]); + } + return validatedHooks; + } + /** + * Deletes every records in the current collection and marks the collection as + * never synced. + * + * @return {Promise} + */ + async clear() { + await this.db.clear(); + await this.db.saveMetadata(null); + await this.db.saveLastModified(null); + return { data: [], permissions: {} }; + } + /** + * Encodes a record. + * + * @param {String} type Either "remote" or "local". + * @param {Object} record The record object to encode. + * @return {Promise} + */ + _encodeRecord(type, record) { + if (!this[`${type}Transformers`].length) { + return Promise.resolve(record); + } + return waterfall(this[`${type}Transformers`].map(transformer => { + return record => transformer.encode(record); + }), record); + } + /** + * Decodes a record. + * + * @param {String} type Either "remote" or "local". + * @param {Object} record The record object to decode. + * @return {Promise} + */ + _decodeRecord(type, record) { + if (!this[`${type}Transformers`].length) { + return Promise.resolve(record); + } + return waterfall(this[`${type}Transformers`].reverse().map(transformer => { + return record => transformer.decode(record); + }), record); + } + /** + * Adds a record to the local database, asserting that none + * already exist with this ID. + * + * Note: If either the `useRecordId` or `synced` options are true, then the + * record object must contain the id field to be validated. If none of these + * options are true, an id is generated using the current IdSchema; in this + * case, the record passed must not have an id. + * + * Options: + * - {Boolean} synced Sets record status to "synced" (default: `false`). + * - {Boolean} useRecordId Forces the `id` field from the record to be used, + * instead of one that is generated automatically + * (default: `false`). + * + * @param {Object} record + * @param {Object} options + * @return {Promise} + */ + create(record, options = { useRecordId: false, synced: false }) { + // Validate the record and its ID (if any), even though this + // validation is also done in the CollectionTransaction method, + // because we need to pass the ID to preloadIds. + const reject = msg => Promise.reject(new Error(msg)); + if (typeof record !== "object") { + return reject("Record is not an object."); + } + if ((options.synced || options.useRecordId) && + !Object.prototype.hasOwnProperty.call(record, "id")) { + return reject("Missing required Id; synced and useRecordId options require one"); + } + if (!options.synced && + !options.useRecordId && + Object.prototype.hasOwnProperty.call(record, "id")) { + return reject("Extraneous Id; can't create a record having one set."); + } + const newRecord = Object.assign(Object.assign({}, record), { id: options.synced || options.useRecordId + ? record.id + : this.idSchema.generate(record), _status: options.synced ? "synced" : "created" }); + if (!this.idSchema.validate(newRecord.id)) { + return reject(`Invalid Id: ${newRecord.id}`); + } + return this.execute(txn => txn.create(newRecord), { + preloadIds: [newRecord.id], + }).catch(err => { + if (options.useRecordId) { + throw new Error("Couldn't create record. It may have been virtually deleted."); + } + throw err; + }); + } + /** + * Like {@link CollectionTransaction#update}, but wrapped in its own transaction. + * + * Options: + * - {Boolean} synced: Sets record status to "synced" (default: false) + * - {Boolean} patch: Extends the existing record instead of overwriting it + * (default: false) + * + * @param {Object} record + * @param {Object} options + * @return {Promise} + */ + update(record, options = { synced: false, patch: false }) { + // Validate the record and its ID, even though this validation is + // also done in the CollectionTransaction method, because we need + // to pass the ID to preloadIds. + if (typeof record !== "object") { + return Promise.reject(new Error("Record is not an object.")); + } + if (!Object.prototype.hasOwnProperty.call(record, "id")) { + return Promise.reject(new Error("Cannot update a record missing id.")); + } + if (!this.idSchema.validate(record.id)) { + return Promise.reject(new Error(`Invalid Id: ${record.id}`)); + } + return this.execute(txn => txn.update(record, options), { + preloadIds: [record.id], + }); + } + /** + * Like {@link CollectionTransaction#upsert}, but wrapped in its own transaction. + * + * @param {Object} record + * @return {Promise} + */ + upsert(record) { + // Validate the record and its ID, even though this validation is + // also done in the CollectionTransaction method, because we need + // to pass the ID to preloadIds. + if (typeof record !== "object") { + return Promise.reject(new Error("Record is not an object.")); + } + if (!Object.prototype.hasOwnProperty.call(record, "id")) { + return Promise.reject(new Error("Cannot update a record missing id.")); + } + if (!this.idSchema.validate(record.id)) { + return Promise.reject(new Error(`Invalid Id: ${record.id}`)); + } + return this.execute(txn => txn.upsert(record), { preloadIds: [record.id] }); + } + /** + * Like {@link CollectionTransaction#get}, but wrapped in its own transaction. + * + * Options: + * - {Boolean} includeDeleted: Include virtually deleted records. + * + * @param {String} id + * @param {Object} options + * @return {Promise} + */ + get(id, options = { includeDeleted: false }) { + return this.execute(txn => txn.get(id, options), { preloadIds: [id] }); + } + /** + * Like {@link CollectionTransaction#getAny}, but wrapped in its own transaction. + * + * @param {String} id + * @return {Promise} + */ + getAny(id) { + return this.execute(txn => txn.getAny(id), { preloadIds: [id] }); + } + /** + * Same as {@link Collection#delete}, but wrapped in its own transaction. + * + * Options: + * - {Boolean} virtual: When set to `true`, doesn't actually delete the record, + * update its `_status` attribute to `deleted` instead (default: true) + * + * @param {String} id The record's Id. + * @param {Object} options The options object. + * @return {Promise} + */ + delete(id, options = { virtual: true }) { + return this.execute(transaction => { + return transaction.delete(id, options); + }, { preloadIds: [id] }); + } + /** + * Same as {@link Collection#deleteAll}, but wrapped in its own transaction, execulding the parameter. + * + * @return {Promise} + */ + async deleteAll() { + const { data } = await this.list({}, { includeDeleted: false }); + const recordIds = data.map(record => record.id); + return this.execute(transaction => { + return transaction.deleteAll(recordIds); + }, { preloadIds: recordIds }); + } + /** + * The same as {@link CollectionTransaction#deleteAny}, but wrapped + * in its own transaction. + * + * @param {String} id The record's Id. + * @return {Promise} + */ + deleteAny(id) { + return this.execute(txn => txn.deleteAny(id), { preloadIds: [id] }); + } + /** + * Lists records from the local database. + * + * Params: + * - {Object} filters Filter the results (default: `{}`). + * - {String} order The order to apply (default: `-last_modified`). + * + * Options: + * - {Boolean} includeDeleted: Include virtually deleted records. + * + * @param {Object} params The filters and order to apply to the results. + * @param {Object} options The options object. + * @return {Promise} + */ + async list(params = {}, options = { includeDeleted: false }) { + params = Object.assign({ order: "-last_modified", filters: {} }, params); + const results = await this.db.list(params); + let data = results; + if (!options.includeDeleted) { + data = results.filter(record => record._status !== "deleted"); + } + return { data, permissions: {} }; + } + /** + * Imports remote changes into the local database. + * This method is in charge of detecting the conflicts, and resolve them + * according to the specified strategy. + * @param {SyncResultObject} syncResultObject The sync result object. + * @param {Array} decodedChanges The list of changes to import in the local database. + * @param {String} strategy The {@link Collection.strategy} (default: MANUAL) + * @return {Promise} + */ + async importChanges(syncResultObject, decodedChanges, strategy = Collection.strategy.MANUAL) { + // Retrieve records matching change ids. + try { + for (let i = 0; i < decodedChanges.length; i += IMPORT_CHUNK_SIZE) { + const slice = decodedChanges.slice(i, i + IMPORT_CHUNK_SIZE); + const { imports, resolved } = await this.db.execute(transaction => { + const imports = slice.map(remote => { + // Store remote change into local database. + return importChange(transaction, remote, this.localFields, strategy); + }); + const conflicts = imports + .filter(i => i.type === "conflicts") + .map(i => i.data); + const resolved = this._handleConflicts(transaction, conflicts, strategy); + return { imports, resolved }; + }, { preload: slice.map(record => record.id) }); + // Lists of created/updated/deleted records + imports.forEach(({ type, data }) => syncResultObject.add(type, data)); + // Automatically resolved conflicts (if not manual) + if (resolved.length > 0) { + syncResultObject.reset("conflicts").add("resolved", resolved); + } + } + } + catch (err) { + const data = { + type: "incoming", + message: err.message, + stack: err.stack, + }; + // XXX one error of the whole transaction instead of per atomic op + syncResultObject.add("errors", data); + } + return syncResultObject; + } + /** + * Imports the responses of pushed changes into the local database. + * Basically it stores the timestamp assigned by the server into the local + * database. + * @param {SyncResultObject} syncResultObject The sync result object. + * @param {Array} toApplyLocally The list of changes to import in the local database. + * @param {Array} conflicts The list of conflicts that have to be resolved. + * @param {String} strategy The {@link Collection.strategy}. + * @return {Promise} + */ + async _applyPushedResults(syncResultObject, toApplyLocally, conflicts, strategy = Collection.strategy.MANUAL) { + const toDeleteLocally = toApplyLocally.filter(r => r.deleted); + const toUpdateLocally = toApplyLocally.filter(r => !r.deleted); + const { published, resolved } = await this.db.execute(transaction => { + const updated = toUpdateLocally.map(record => { + const synced = markSynced(record); + transaction.update(synced); + return synced; + }); + const deleted = toDeleteLocally.map(record => { + transaction.delete(record.id); + // Amend result data with the deleted attribute set + return { id: record.id, deleted: true }; + }); + const published = updated.concat(deleted); + // Handle conflicts, if any + const resolved = this._handleConflicts(transaction, conflicts, strategy); + return { published, resolved }; + }); + syncResultObject.add("published", published); + if (resolved.length > 0) { + syncResultObject + .reset("conflicts") + .reset("resolved") + .add("resolved", resolved); + } + return syncResultObject; + } + /** + * Handles synchronization conflicts according to specified strategy. + * + * @param {SyncResultObject} result The sync result object. + * @param {String} strategy The {@link Collection.strategy}. + * @return {Promise<Array<Object>>} The resolved conflicts, as an + * array of {accepted, rejected} objects + */ + _handleConflicts(transaction, conflicts, strategy) { + if (strategy === Collection.strategy.MANUAL) { + return []; + } + return conflicts.map(conflict => { + const resolution = strategy === Collection.strategy.CLIENT_WINS + ? conflict.local + : conflict.remote; + const rejected = strategy === Collection.strategy.CLIENT_WINS + ? conflict.remote + : conflict.local; + let accepted, status, id; + if (resolution === null) { + // We "resolved" with the server-side deletion. Delete locally. + // This only happens during SERVER_WINS because the local + // version of a record can never be null. + // We can get "null" from the remote side if we got a conflict + // and there is no remote version available; see kinto-http.js + // batch.js:aggregate. + transaction.delete(conflict.local.id); + accepted = null; + // The record was deleted, but that status is "synced" with + // the server, so we don't need to push the change. + status = "synced"; + id = conflict.local.id; + } + else { + const updated = this._resolveRaw(conflict, resolution); + transaction.update(updated); + accepted = updated; + status = updated._status; + id = updated.id; + } + return { rejected, accepted, id, _status: status }; + }); + } + /** + * Execute a bunch of operations in a transaction. + * + * This transaction should be atomic -- either all of its operations + * will succeed, or none will. + * + * The argument to this function is itself a function which will be + * called with a {@link CollectionTransaction}. Collection methods + * are available on this transaction, but instead of returning + * promises, they are synchronous. execute() returns a Promise whose + * value will be the return value of the provided function. + * + * Most operations will require access to the record itself, which + * must be preloaded by passing its ID in the preloadIds option. + * + * Options: + * - {Array} preloadIds: list of IDs to fetch at the beginning of + * the transaction + * + * @return {Promise} Resolves with the result of the given function + * when the transaction commits. + */ + execute(doOperations, { preloadIds = [] } = {}) { + for (const id of preloadIds) { + if (!this.idSchema.validate(id)) { + return Promise.reject(Error(`Invalid Id: ${id}`)); + } + } + return this.db.execute(transaction => { + const txn = new CollectionTransaction(this, transaction); + const result = doOperations(txn); + txn.emitEvents(); + return result; + }, { preload: preloadIds }); + } + /** + * Resets the local records as if they were never synced; existing records are + * marked as newly created, deleted records are dropped. + * + * A next call to {@link Collection.sync} will thus republish the whole + * content of the local collection to the server. + * + * @return {Promise} Resolves with the number of processed records. + */ + async resetSyncStatus() { + const unsynced = await this.list({ filters: { _status: ["deleted", "synced"] }, order: "" }, { includeDeleted: true }); + await this.db.execute(transaction => { + unsynced.data.forEach(record => { + if (record._status === "deleted") { + // Garbage collect deleted records. + transaction.delete(record.id); + } + else { + // Records that were synced become «created». + transaction.update(Object.assign(Object.assign({}, record), { last_modified: undefined, _status: "created" })); + } + }); + }); + this._lastModified = null; + await this.db.saveLastModified(null); + return unsynced.data.length; + } + /** + * Returns an object containing two lists: + * + * - `toDelete`: unsynced deleted records we can safely delete; + * - `toSync`: local updates to send to the server. + * + * @return {Promise} + */ + async gatherLocalChanges() { + const unsynced = await this.list({ + filters: { _status: ["created", "updated"] }, + order: "", + }); + const deleted = await this.list({ filters: { _status: "deleted" }, order: "" }, { includeDeleted: true }); + return await Promise.all(unsynced.data + .concat(deleted.data) + .map(this._encodeRecord.bind(this, "remote"))); + } + /** + * Fetch remote changes, import them to the local database, and handle + * conflicts according to `options.strategy`. Then, updates the passed + * {@link SyncResultObject} with import results. + * + * Options: + * - {String} strategy: The selected sync strategy. + * - {String} expectedTimestamp: A timestamp to use as a "cache busting" query parameter. + * - {Array<String>} exclude: A list of record ids to exclude from pull. + * - {Object} headers: The HTTP headers to use in the request. + * - {int} retry: The number of retries to do if the HTTP request fails. + * - {int} lastModified: The timestamp to use in `?_since` query. + * + * @param {KintoClient.Collection} client Kinto client Collection instance. + * @param {SyncResultObject} syncResultObject The sync result object. + * @param {Object} options The options object. + * @return {Promise} + */ + async pullChanges(client, syncResultObject, options = {}) { + if (!syncResultObject.ok) { + return syncResultObject; + } + const since = this.lastModified + ? this.lastModified + : await this.db.getLastModified(); + options = Object.assign({ strategy: Collection.strategy.MANUAL, lastModified: since, headers: {} }, options); + // Optionally ignore some records when pulling for changes. + // (avoid redownloading our own changes on last step of #sync()) + let filters; + if (options.exclude) { + // Limit the list of excluded records to the first 50 records in order + // to remain under de-facto URL size limit (~2000 chars). + // http://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers/417184#417184 + const exclude_id = options.exclude + .slice(0, 50) + .map(r => r.id) + .join(","); + filters = { exclude_id }; + } + if (options.expectedTimestamp) { + filters = Object.assign(Object.assign({}, filters), { _expected: options.expectedTimestamp }); + } + // First fetch remote changes from the server + const { data, last_modified } = await client.listRecords({ + // Since should be ETag (see https://github.com/Kinto/kinto.js/issues/356) + since: options.lastModified ? `${options.lastModified}` : undefined, + headers: options.headers, + retry: options.retry, + // Fetch every page by default (FIXME: option to limit pages, see #277) + pages: Infinity, + filters, + }); + // last_modified is the ETag header value (string). + // For retro-compatibility with first kinto.js versions + // parse it to integer. + const unquoted = last_modified ? parseInt(last_modified, 10) : undefined; + // Check if server was flushed. + // This is relevant for the Kinto demo server + // (and thus for many new comers). + const localSynced = options.lastModified; + const serverChanged = unquoted > options.lastModified; + const emptyCollection = data.length === 0; + if (!options.exclude && localSynced && serverChanged && emptyCollection) { + const e = new ServerWasFlushedError(localSynced, unquoted, "Server has been flushed. Client Side Timestamp: " + + localSynced + + " Server Side Timestamp: " + + unquoted); + throw e; + } + // Atomic updates are not sensible here because unquoted is not + // computed as a function of syncResultObject.lastModified. + // eslint-disable-next-line require-atomic-updates + syncResultObject.lastModified = unquoted; + // Decode incoming changes. + const decodedChanges = await Promise.all(data.map(change => { + return this._decodeRecord("remote", change); + })); + // Hook receives decoded records. + const payload = { lastModified: unquoted, changes: decodedChanges }; + const afterHooks = await this.applyHook("incoming-changes", payload); + // No change, nothing to import. + if (afterHooks.changes.length > 0) { + // Reflect these changes locally + await this.importChanges(syncResultObject, afterHooks.changes, options.strategy); + } + return syncResultObject; + } + applyHook(hookName, payload) { + if (typeof this.hooks[hookName] == "undefined") { + return Promise.resolve(payload); + } + return waterfall(this.hooks[hookName].map(hook => { + return record => { + const result = hook(payload, this); + const resultThenable = result && typeof result.then === "function"; + const resultChanges = result && Object.prototype.hasOwnProperty.call(result, "changes"); + if (!(resultThenable || resultChanges)) { + throw new Error(`Invalid return value for hook: ${JSON.stringify(result)} has no 'then()' or 'changes' properties`); + } + return result; + }; + }), payload); + } + /** + * Publish local changes to the remote server and updates the passed + * {@link SyncResultObject} with publication results. + * + * Options: + * - {String} strategy: The selected sync strategy. + * - {Object} headers: The HTTP headers to use in the request. + * - {int} retry: The number of retries to do if the HTTP request fails. + * + * @param {KintoClient.Collection} client Kinto client Collection instance. + * @param {SyncResultObject} syncResultObject The sync result object. + * @param {Object} changes The change object. + * @param {Array} changes.toDelete The list of records to delete. + * @param {Array} changes.toSync The list of records to create/update. + * @param {Object} options The options object. + * @return {Promise} + */ + async pushChanges(client, changes, syncResultObject, options = {}) { + if (!syncResultObject.ok) { + return syncResultObject; + } + const safe = !options.strategy || options.strategy !== Collection.CLIENT_WINS; + const toDelete = changes.filter(r => r._status == "deleted"); + const toSync = changes.filter(r => r._status != "deleted"); + // Perform a batch request with every changes. + const synced = await client.batch(batch => { + toDelete.forEach(r => { + // never published locally deleted records should not be pusblished + if (r.last_modified) { + batch.deleteRecord(r); + } + }); + toSync.forEach(r => { + // Clean local fields (like _status) before sending to server. + const published = this.cleanLocalFields(r); + if (r._status === "created") { + batch.createRecord(published); + } + else { + batch.updateRecord(published); + } + }); + }, { + headers: options.headers, + retry: options.retry, + safe, + aggregate: true, + }); + // Store outgoing errors into sync result object + syncResultObject.add("errors", synced.errors.map(e => (Object.assign(Object.assign({}, e), { type: "outgoing" })))); + // Store outgoing conflicts into sync result object + const conflicts = []; + for (const { type, local, remote } of synced.conflicts) { + // Note: we ensure that local data are actually available, as they may + // be missing in the case of a published deletion. + const safeLocal = (local && local.data) || { id: remote.id }; + const realLocal = await this._decodeRecord("remote", safeLocal); + // We can get "null" from the remote side if we got a conflict + // and there is no remote version available; see kinto-http.js + // batch.js:aggregate. + const realRemote = remote && (await this._decodeRecord("remote", remote)); + const conflict = { type, local: realLocal, remote: realRemote }; + conflicts.push(conflict); + } + syncResultObject.add("conflicts", conflicts); + // Records that must be deleted are either deletions that were pushed + // to server (published) or deleted records that were never pushed (skipped). + const missingRemotely = synced.skipped.map(r => (Object.assign(Object.assign({}, r), { deleted: true }))); + // For created and updated records, the last_modified coming from server + // will be stored locally. + // Reflect publication results locally using the response from + // the batch request. + const published = synced.published.map(c => c.data); + const toApplyLocally = published.concat(missingRemotely); + // Apply the decode transformers, if any + const decoded = await Promise.all(toApplyLocally.map(record => { + return this._decodeRecord("remote", record); + })); + // We have to update the local records with the responses of the server + // (eg. last_modified values etc.). + if (decoded.length > 0 || conflicts.length > 0) { + await this._applyPushedResults(syncResultObject, decoded, conflicts, options.strategy); + } + return syncResultObject; + } + /** + * Return a copy of the specified record without the local fields. + * + * @param {Object} record A record with potential local fields. + * @return {Object} + */ + cleanLocalFields(record) { + const localKeys = RECORD_FIELDS_TO_CLEAN.concat(this.localFields); + return omitKeys(record, localKeys); + } + /** + * Resolves a conflict, updating local record according to proposed + * resolution — keeping remote record `last_modified` value as a reference for + * further batch sending. + * + * @param {Object} conflict The conflict object. + * @param {Object} resolution The proposed record. + * @return {Promise} + */ + resolve(conflict, resolution) { + return this.db.execute(transaction => { + const updated = this._resolveRaw(conflict, resolution); + transaction.update(updated); + return { data: updated, permissions: {} }; + }); + } + /** + * @private + */ + _resolveRaw(conflict, resolution) { + const resolved = Object.assign(Object.assign({}, resolution), { + // Ensure local record has the latest authoritative timestamp + last_modified: conflict.remote && conflict.remote.last_modified }); + // If the resolution object is strictly equal to the + // remote record, then we can mark it as synced locally. + // Otherwise, mark it as updated (so that the resolution is pushed). + const synced = deepEqual(resolved, conflict.remote); + return markStatus(resolved, synced ? "synced" : "updated"); + } + /** + * Synchronize remote and local data. The promise will resolve with a + * {@link SyncResultObject}, though will reject: + * + * - if the server is currently backed off; + * - if the server has been detected flushed. + * + * Options: + * - {Object} headers: HTTP headers to attach to outgoing requests. + * - {String} expectedTimestamp: A timestamp to use as a "cache busting" query parameter. + * - {Number} retry: Number of retries when server fails to process the request (default: 1). + * - {Collection.strategy} strategy: See {@link Collection.strategy}. + * - {Boolean} ignoreBackoff: Force synchronization even if server is currently + * backed off. + * - {String} bucket: The remove bucket id to use (default: null) + * - {String} collection: The remove collection id to use (default: null) + * - {String} remote The remote Kinto server endpoint to use (default: null). + * + * @param {Object} options Options. + * @return {Promise} + * @throws {Error} If an invalid remote option is passed. + */ + async sync(options = { + strategy: Collection.strategy.MANUAL, + headers: {}, + retry: 1, + ignoreBackoff: false, + bucket: null, + collection: null, + remote: null, + expectedTimestamp: null, + }) { + options = Object.assign(Object.assign({}, options), { bucket: options.bucket || this.bucket, collection: options.collection || this.name }); + const previousRemote = this.api.remote; + if (options.remote) { + // Note: setting the remote ensures it's valid, throws when invalid. + this.api.remote = options.remote; + } + if (!options.ignoreBackoff && this.api.backoff > 0) { + const seconds = Math.ceil(this.api.backoff / 1000); + return Promise.reject(new Error(`Server is asking clients to back off; retry in ${seconds}s or use the ignoreBackoff option.`)); + } + const client = this.api + .bucket(options.bucket) + .collection(options.collection); + const result = new SyncResultObject(); + try { + // Fetch collection metadata. + await this.pullMetadata(client, options); + // Fetch last changes from the server. + await this.pullChanges(client, result, options); + const { lastModified } = result; + if (options.strategy != Collection.strategy.PULL_ONLY) { + // Fetch local changes + const toSync = await this.gatherLocalChanges(); + // Publish local changes and pull local resolutions + await this.pushChanges(client, toSync, result, options); + // Publish local resolution of push conflicts to server (on CLIENT_WINS) + const resolvedUnsynced = result.resolved.filter(r => r._status !== "synced"); + if (resolvedUnsynced.length > 0) { + const resolvedEncoded = await Promise.all(resolvedUnsynced.map(resolution => { + let record = resolution.accepted; + if (record === null) { + record = { id: resolution.id, _status: resolution._status }; + } + return this._encodeRecord("remote", record); + })); + await this.pushChanges(client, resolvedEncoded, result, options); + } + // Perform a last pull to catch changes that occured after the last pull, + // while local changes were pushed. Do not do it nothing was pushed. + if (result.published.length > 0) { + // Avoid redownloading our own changes during the last pull. + const pullOpts = Object.assign(Object.assign({}, options), { lastModified, exclude: result.published }); + await this.pullChanges(client, result, pullOpts); + } + } + // Don't persist lastModified value if any conflict or error occured + if (result.ok) { + // No conflict occured, persist collection's lastModified value + this._lastModified = await this.db.saveLastModified(result.lastModified); + } + } + catch (e) { + this.events.emit("sync:error", Object.assign(Object.assign({}, options), { error: e })); + throw e; + } + finally { + // Ensure API default remote is reverted if a custom one's been used + this.api.remote = previousRemote; + } + this.events.emit("sync:success", Object.assign(Object.assign({}, options), { result })); + return result; + } + /** + * Load a list of records already synced with the remote server. + * + * The local records which are unsynced or whose timestamp is either missing + * or superior to those being loaded will be ignored. + * + * @deprecated Use {@link importBulk} instead. + * @param {Array} records The previously exported list of records to load. + * @return {Promise} with the effectively imported records. + */ + async loadDump(records) { + return this.importBulk(records); + } + /** + * Load a list of records already synced with the remote server. + * + * The local records which are unsynced or whose timestamp is either missing + * or superior to those being loaded will be ignored. + * + * @param {Array} records The previously exported list of records to load. + * @return {Promise} with the effectively imported records. + */ + async importBulk(records) { + if (!Array.isArray(records)) { + throw new Error("Records is not an array."); + } + for (const record of records) { + if (!Object.prototype.hasOwnProperty.call(record, "id") || + !this.idSchema.validate(record.id)) { + throw new Error("Record has invalid ID: " + JSON.stringify(record)); + } + if (!record.last_modified) { + throw new Error("Record has no last_modified value: " + JSON.stringify(record)); + } + } + // Fetch all existing records from local database, + // and skip those who are newer or not marked as synced. + // XXX filter by status / ids in records + const { data } = await this.list({}, { includeDeleted: true }); + const existingById = data.reduce((acc, record) => { + acc[record.id] = record; + return acc; + }, {}); + const newRecords = records.filter(record => { + const localRecord = existingById[record.id]; + const shouldKeep = + // No local record with this id. + localRecord === undefined || + // Or local record is synced + (localRecord._status === "synced" && + // And was synced from server + localRecord.last_modified !== undefined && + // And is older than imported one. + record.last_modified > localRecord.last_modified); + return shouldKeep; + }); + return await this.db.importBulk(newRecords.map(markSynced)); + } + async pullMetadata(client, options = {}) { + const { expectedTimestamp, headers } = options; + const query = expectedTimestamp + ? { query: { _expected: expectedTimestamp } } + : undefined; + const metadata = await client.getData(Object.assign(Object.assign({}, query), { headers })); + return this.db.saveMetadata(metadata); + } + async metadata() { + return this.db.getMetadata(); + } + } + /** + * A Collection-oriented wrapper for an adapter's transaction. + * + * This defines the high-level functions available on a collection. + * The collection itself offers functions of the same name. These will + * perform just one operation in its own transaction. + */ + class CollectionTransaction { + constructor(collection, adapterTransaction) { + this.collection = collection; + this.adapterTransaction = adapterTransaction; + this._events = []; + } + _queueEvent(action, payload) { + this._events.push({ action, payload }); + } + /** + * Emit queued events, to be called once every transaction operations have + * been executed successfully. + */ + emitEvents() { + for (const { action, payload } of this._events) { + this.collection.events.emit(action, payload); + } + if (this._events.length > 0) { + const targets = this._events.map(({ action, payload }) => (Object.assign({ action }, payload))); + this.collection.events.emit("change", { targets }); + } + this._events = []; + } + /** + * Retrieve a record by its id from the local database, or + * undefined if none exists. + * + * This will also return virtually deleted records. + * + * @param {String} id + * @return {Object} + */ + getAny(id) { + const record = this.adapterTransaction.get(id); + return { data: record, permissions: {} }; + } + /** + * Retrieve a record by its id from the local database. + * + * Options: + * - {Boolean} includeDeleted: Include virtually deleted records. + * + * @param {String} id + * @param {Object} options + * @return {Object} + */ + get(id, options = { includeDeleted: false }) { + const res = this.getAny(id); + if (!res.data || + (!options.includeDeleted && res.data._status === "deleted")) { + throw new Error(`Record with id=${id} not found.`); + } + return res; + } + /** + * Deletes a record from the local database. + * + * Options: + * - {Boolean} virtual: When set to `true`, doesn't actually delete the record, + * update its `_status` attribute to `deleted` instead (default: true) + * + * @param {String} id The record's Id. + * @param {Object} options The options object. + * @return {Object} + */ + delete(id, options = { virtual: true }) { + // Ensure the record actually exists. + const existing = this.adapterTransaction.get(id); + const alreadyDeleted = existing && existing._status == "deleted"; + if (!existing || (alreadyDeleted && options.virtual)) { + throw new Error(`Record with id=${id} not found.`); + } + // Virtual updates status. + if (options.virtual) { + this.adapterTransaction.update(markDeleted(existing)); + } + else { + // Delete for real. + this.adapterTransaction.delete(id); + } + this._queueEvent("delete", { data: existing }); + return { data: existing, permissions: {} }; + } + /** + * Soft delete all records from the local database. + * + * @param {Array} ids Array of non-deleted Record Ids. + * @return {Object} + */ + deleteAll(ids) { + const existingRecords = []; + ids.forEach(id => { + existingRecords.push(this.adapterTransaction.get(id)); + this.delete(id); + }); + this._queueEvent("deleteAll", { data: existingRecords }); + return { data: existingRecords, permissions: {} }; + } + /** + * Deletes a record from the local database, if any exists. + * Otherwise, do nothing. + * + * @param {String} id The record's Id. + * @return {Object} + */ + deleteAny(id) { + const existing = this.adapterTransaction.get(id); + if (existing) { + this.adapterTransaction.update(markDeleted(existing)); + this._queueEvent("delete", { data: existing }); + } + return { data: Object.assign({ id }, existing), deleted: !!existing, permissions: {} }; + } + /** + * Adds a record to the local database, asserting that none + * already exist with this ID. + * + * @param {Object} record, which must contain an ID + * @return {Object} + */ + create(record) { + if (typeof record !== "object") { + throw new Error("Record is not an object."); + } + if (!Object.prototype.hasOwnProperty.call(record, "id")) { + throw new Error("Cannot create a record missing id"); + } + if (!this.collection.idSchema.validate(record.id)) { + throw new Error(`Invalid Id: ${record.id}`); + } + this.adapterTransaction.create(record); + this._queueEvent("create", { data: record }); + return { data: record, permissions: {} }; + } + /** + * Updates a record from the local database. + * + * Options: + * - {Boolean} synced: Sets record status to "synced" (default: false) + * - {Boolean} patch: Extends the existing record instead of overwriting it + * (default: false) + * + * @param {Object} record + * @param {Object} options + * @return {Object} + */ + update(record, options = { synced: false, patch: false }) { + if (typeof record !== "object") { + throw new Error("Record is not an object."); + } + if (!Object.prototype.hasOwnProperty.call(record, "id")) { + throw new Error("Cannot update a record missing id."); + } + if (!this.collection.idSchema.validate(record.id)) { + throw new Error(`Invalid Id: ${record.id}`); + } + const oldRecord = this.adapterTransaction.get(record.id); + if (!oldRecord) { + throw new Error(`Record with id=${record.id} not found.`); + } + const newRecord = options.patch ? Object.assign(Object.assign({}, oldRecord), record) : record; + const updated = this._updateRaw(oldRecord, newRecord, options); + this.adapterTransaction.update(updated); + this._queueEvent("update", { data: updated, oldRecord }); + return { data: updated, oldRecord, permissions: {} }; + } + /** + * Lower-level primitive for updating a record while respecting + * _status and last_modified. + * + * @param {Object} oldRecord: the record retrieved from the DB + * @param {Object} newRecord: the record to replace it with + * @return {Object} + */ + _updateRaw(oldRecord, newRecord, { synced = false } = {}) { + const updated = Object.assign({}, newRecord); + // Make sure to never loose the existing timestamp. + if (oldRecord && oldRecord.last_modified && !updated.last_modified) { + updated.last_modified = oldRecord.last_modified; + } + // If only local fields have changed, then keep record as synced. + // If status is created, keep record as created. + // If status is deleted, mark as updated. + const isIdentical = oldRecord && + recordsEqual(oldRecord, updated, this.collection.localFields); + const keepSynced = isIdentical && oldRecord._status == "synced"; + const neverSynced = !oldRecord || (oldRecord && oldRecord._status == "created"); + const newStatus = keepSynced || synced ? "synced" : neverSynced ? "created" : "updated"; + return markStatus(updated, newStatus); + } + /** + * Upsert a record into the local database. + * + * This record must have an ID. + * + * If a record with this ID already exists, it will be replaced. + * Otherwise, this record will be inserted. + * + * @param {Object} record + * @return {Object} + */ + upsert(record) { + if (typeof record !== "object") { + throw new Error("Record is not an object."); + } + if (!Object.prototype.hasOwnProperty.call(record, "id")) { + throw new Error("Cannot update a record missing id."); + } + if (!this.collection.idSchema.validate(record.id)) { + throw new Error(`Invalid Id: ${record.id}`); + } + let oldRecord = this.adapterTransaction.get(record.id); + const updated = this._updateRaw(oldRecord, record); + this.adapterTransaction.update(updated); + // Don't return deleted records -- pretend they are gone + if (oldRecord && oldRecord._status == "deleted") { + oldRecord = undefined; + } + if (oldRecord) { + this._queueEvent("update", { data: updated, oldRecord }); + } + else { + this._queueEvent("create", { data: updated }); + } + return { data: updated, oldRecord, permissions: {} }; + } + } + + const DEFAULT_BUCKET_NAME = "default"; + const DEFAULT_REMOTE = "http://localhost:8888/v1"; + const DEFAULT_RETRY = 1; + /** + * KintoBase class. + */ + class KintoBase { + /** + * Provides a public access to the base adapter class. Users can create a + * custom DB adapter by extending {@link BaseAdapter}. + * + * @type {Object} + */ + static get adapters() { + return { + BaseAdapter: BaseAdapter, + }; + } + /** + * Synchronization strategies. Available strategies are: + * + * - `MANUAL`: Conflicts will be reported in a dedicated array. + * - `SERVER_WINS`: Conflicts are resolved using remote data. + * - `CLIENT_WINS`: Conflicts are resolved using local data. + * + * @type {Object} + */ + static get syncStrategy() { + return Collection.strategy; + } + /** + * Constructor. + * + * Options: + * - `{String}` `remote` The server URL to use. + * - `{String}` `bucket` The collection bucket name. + * - `{EventEmitter}` `events` Events handler. + * - `{BaseAdapter}` `adapter` The base DB adapter class. + * - `{Object}` `adapterOptions` Options given to the adapter. + * - `{Object}` `headers` The HTTP headers to use. + * - `{Object}` `retry` Number of retries when the server fails to process the request (default: `1`) + * - `{String}` `requestMode` The HTTP CORS mode to use. + * - `{Number}` `timeout` The requests timeout in ms (default: `5000`). + * + * @param {Object} options The options object. + */ + constructor(options = {}) { + const defaults = { + bucket: DEFAULT_BUCKET_NAME, + remote: DEFAULT_REMOTE, + retry: DEFAULT_RETRY, + }; + this._options = Object.assign(Object.assign({}, defaults), options); + if (!this._options.adapter) { + throw new Error("No adapter provided"); + } + this._api = null; + /** + * The event emitter instance. + * @type {EventEmitter} + */ + this.events = this._options.events; + } + /** + * The kinto HTTP client instance. + * @type {KintoClient} + */ + get api() { + const { events, headers, remote, requestMode, retry, timeout, } = this._options; + if (!this._api) { + this._api = new this.ApiClass(remote, { + events, + headers, + requestMode, + retry, + timeout, + }); + } + return this._api; + } + /** + * Creates a {@link Collection} instance. The second (optional) parameter + * will set collection-level options like e.g. `remoteTransformers`. + * + * @param {String} collName The collection name. + * @param {Object} [options={}] Extra options or override client's options. + * @param {Object} [options.idSchema] IdSchema instance (default: UUID) + * @param {Object} [options.remoteTransformers] Array<RemoteTransformer> (default: `[]`]) + * @param {Object} [options.hooks] Array<Hook> (default: `[]`]) + * @param {Object} [options.localFields] Array<Field> (default: `[]`]) + * @return {Collection} + */ + collection(collName, options = {}) { + if (!collName) { + throw new Error("missing collection name"); + } + const { bucket, events, adapter, adapterOptions } = Object.assign(Object.assign({}, this._options), options); + const { idSchema, remoteTransformers, hooks, localFields } = options; + return new Collection(bucket, collName, this, { + events, + adapter, + adapterOptions, + idSchema, + remoteTransformers, + hooks, + localFields, + }); + } + } + + /* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + const { setTimeout, clearTimeout } = ChromeUtils.importESModule("resource://gre/modules/Timer.sys.mjs"); + const { XPCOMUtils } = ChromeUtils.importESModule("resource://gre/modules/XPCOMUtils.sys.mjs"); + XPCOMUtils.defineLazyGlobalGetters(global, ["fetch", "indexedDB"]); + ChromeUtils.defineESModuleGetters(global, { + EventEmitter: "resource://gre/modules/EventEmitter.sys.mjs", + // Use standalone kinto-http module landed in FFx. + KintoHttpClient: "resource://services-common/kinto-http-client.sys.mjs" + }); + ChromeUtils.defineLazyGetter(global, "generateUUID", () => { + const { generateUUID } = Cc["@mozilla.org/uuid-generator;1"].getService(Ci.nsIUUIDGenerator); + return generateUUID; + }); + class Kinto extends KintoBase { + static get adapters() { + return { + BaseAdapter, + IDB, + }; + } + get ApiClass() { + return KintoHttpClient; + } + constructor(options = {}) { + const events = {}; + EventEmitter.decorate(events); + const defaults = { + adapter: IDB, + events, + }; + super(Object.assign(Object.assign({}, defaults), options)); + } + collection(collName, options = {}) { + const idSchema = { + validate(id) { + return typeof id == "string" && RE_RECORD_ID.test(id); + }, + generate() { + return generateUUID() + .toString() + .replace(/[{}]/g, ""); + }, + }; + return super.collection(collName, Object.assign({ idSchema }, options)); + } + } + + return Kinto; + +}))); diff --git a/services/common/kinto-storage-adapter.sys.mjs b/services/common/kinto-storage-adapter.sys.mjs new file mode 100644 index 0000000000..75c6c06a26 --- /dev/null +++ b/services/common/kinto-storage-adapter.sys.mjs @@ -0,0 +1,553 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Sqlite } from "resource://gre/modules/Sqlite.sys.mjs"; + +const { Kinto } = ChromeUtils.import( + "resource://services-common/kinto-offline-client.js" +); + +/** + * Filter and sort list against provided filters and order. + * + * @param {Object} filters The filters to apply. + * @param {String} order The order to apply. + * @param {Array} list The list to reduce. + * @return {Array} + */ +function reduceRecords(filters, order, list) { + const filtered = filters ? filterObjects(filters, list) : list; + return order ? sortObjects(order, filtered) : filtered; +} + +/** + * Checks if a value is undefined. + * + * This is a copy of `_isUndefined` from kinto.js/src/utils.js. + * @param {Any} value + * @return {Boolean} + */ +function _isUndefined(value) { + return typeof value === "undefined"; +} + +/** + * Sorts records in a list according to a given ordering. + * + * This is a copy of `sortObjects` from kinto.js/src/utils.js. + * + * @param {String} order The ordering, eg. `-last_modified`. + * @param {Array} list The collection to order. + * @return {Array} + */ +function sortObjects(order, list) { + const hasDash = order[0] === "-"; + const field = hasDash ? order.slice(1) : order; + const direction = hasDash ? -1 : 1; + return list.slice().sort((a, b) => { + if (a[field] && _isUndefined(b[field])) { + return direction; + } + if (b[field] && _isUndefined(a[field])) { + return -direction; + } + if (_isUndefined(a[field]) && _isUndefined(b[field])) { + return 0; + } + return a[field] > b[field] ? direction : -direction; + }); +} + +/** + * Test if a single object matches all given filters. + * + * This is a copy of `filterObject` from kinto.js/src/utils.js. + * + * @param {Object} filters The filters object. + * @param {Object} entry The object to filter. + * @return {Function} + */ +function filterObject(filters, entry) { + return Object.keys(filters).every(filter => { + const value = filters[filter]; + if (Array.isArray(value)) { + return value.some(candidate => candidate === entry[filter]); + } + return entry[filter] === value; + }); +} + +/** + * Filters records in a list matching all given filters. + * + * This is a copy of `filterObjects` from kinto.js/src/utils.js. + * + * @param {Object} filters The filters object. + * @param {Array} list The collection to filter. + * @return {Array} + */ +function filterObjects(filters, list) { + return list.filter(entry => { + return filterObject(filters, entry); + }); +} + +const statements = { + createCollectionData: ` + CREATE TABLE collection_data ( + collection_name TEXT, + record_id TEXT, + record TEXT + );`, + + createCollectionMetadata: ` + CREATE TABLE collection_metadata ( + collection_name TEXT PRIMARY KEY, + last_modified INTEGER, + metadata TEXT + ) WITHOUT ROWID;`, + + createCollectionDataRecordIdIndex: ` + CREATE UNIQUE INDEX unique_collection_record + ON collection_data(collection_name, record_id);`, + + clearData: ` + DELETE FROM collection_data + WHERE collection_name = :collection_name;`, + + createData: ` + INSERT INTO collection_data (collection_name, record_id, record) + VALUES (:collection_name, :record_id, :record);`, + + updateData: ` + INSERT OR REPLACE INTO collection_data (collection_name, record_id, record) + VALUES (:collection_name, :record_id, :record);`, + + deleteData: ` + DELETE FROM collection_data + WHERE collection_name = :collection_name + AND record_id = :record_id;`, + + saveLastModified: ` + INSERT INTO collection_metadata(collection_name, last_modified) + VALUES(:collection_name, :last_modified) + ON CONFLICT(collection_name) DO UPDATE SET last_modified = :last_modified`, + + getLastModified: ` + SELECT last_modified + FROM collection_metadata + WHERE collection_name = :collection_name;`, + + saveMetadata: ` + INSERT INTO collection_metadata(collection_name, metadata) + VALUES(:collection_name, :metadata) + ON CONFLICT(collection_name) DO UPDATE SET metadata = :metadata`, + + getMetadata: ` + SELECT metadata + FROM collection_metadata + WHERE collection_name = :collection_name;`, + + getRecord: ` + SELECT record + FROM collection_data + WHERE collection_name = :collection_name + AND record_id = :record_id;`, + + listRecords: ` + SELECT record + FROM collection_data + WHERE collection_name = :collection_name;`, + + // N.B. we have to have a dynamic number of placeholders, which you + // can't do without building your own statement. See `execute` for details + listRecordsById: ` + SELECT record_id, record + FROM collection_data + WHERE collection_name = ? + AND record_id IN `, + + importData: ` + REPLACE INTO collection_data (collection_name, record_id, record) + VALUES (:collection_name, :record_id, :record);`, + + scanAllRecords: `SELECT * FROM collection_data;`, + + clearCollectionMetadata: `DELETE FROM collection_metadata;`, + + calculateStorage: ` + SELECT collection_name, SUM(LENGTH(record)) as size, COUNT(record) as num_records + FROM collection_data + GROUP BY collection_name;`, + + addMetadataColumn: ` + ALTER TABLE collection_metadata + ADD COLUMN metadata TEXT;`, +}; + +const createStatements = [ + "createCollectionData", + "createCollectionMetadata", + "createCollectionDataRecordIdIndex", +]; + +const currentSchemaVersion = 2; + +/** + * Firefox adapter. + * + * Uses Sqlite as a backing store. + * + * Options: + * - sqliteHandle: a handle to the Sqlite database this adapter will + * use as its backing store. To open such a handle, use the + * static openConnection() method. + */ +export class FirefoxAdapter extends Kinto.adapters.BaseAdapter { + constructor(collection, options = {}) { + super(); + const { sqliteHandle = null } = options; + this.collection = collection; + this._connection = sqliteHandle; + this._options = options; + } + + /** + * Initialize a Sqlite connection to be suitable for use with Kinto. + * + * This will be called automatically by open(). + */ + static async _init(connection) { + await connection.executeTransaction(async function doSetup() { + const schema = await connection.getSchemaVersion(); + + if (schema == 0) { + for (let statementName of createStatements) { + await connection.execute(statements[statementName]); + } + await connection.setSchemaVersion(currentSchemaVersion); + } else if (schema == 1) { + await connection.execute(statements.addMetadataColumn); + await connection.setSchemaVersion(currentSchemaVersion); + } else if (schema != 2) { + throw new Error("Unknown database schema: " + schema); + } + }); + return connection; + } + + _executeStatement(statement, params) { + return this._connection.executeCached(statement, params); + } + + /** + * Open and initialize a Sqlite connection to a database that Kinto + * can use. When you are done with this connection, close it by + * calling close(). + * + * Options: + * - path: The path for the Sqlite database + * + * @returns SqliteConnection + */ + static async openConnection(options) { + const opts = Object.assign({}, { sharedMemoryCache: false }, options); + const conn = await Sqlite.openConnection(opts).then(this._init); + try { + Sqlite.shutdown.addBlocker( + "Kinto storage adapter connection closing", + () => conn.close() + ); + } catch (e) { + // It's too late to block shutdown, just close the connection. + await conn.close(); + throw e; + } + return conn; + } + + clear() { + const params = { collection_name: this.collection }; + return this._executeStatement(statements.clearData, params); + } + + execute(callback, options = { preload: [] }) { + let result; + const conn = this._connection; + const collection = this.collection; + + return conn + .executeTransaction(async function doExecuteTransaction() { + // Preload specified records from DB, within transaction. + + // if options.preload has more elements than the sqlite variable + // limit, split it up. + const limit = 100; + let preloaded = {}; + let preload; + let more = options.preload; + + while (more.length) { + preload = more.slice(0, limit); + more = more.slice(limit, more.length); + + const parameters = [collection, ...preload]; + const placeholders = preload.map(_ => "?"); + const stmt = + statements.listRecordsById + "(" + placeholders.join(",") + ");"; + const rows = await conn.execute(stmt, parameters); + + rows.reduce((acc, row) => { + const record = JSON.parse(row.getResultByName("record")); + acc[row.getResultByName("record_id")] = record; + return acc; + }, preloaded); + } + const proxy = transactionProxy(collection, preloaded); + result = callback(proxy); + + for (let { statement, params } of proxy.operations) { + await conn.executeCached(statement, params); + } + }, conn.TRANSACTION_EXCLUSIVE) + .then(_ => result); + } + + get(id) { + const params = { + collection_name: this.collection, + record_id: id, + }; + return this._executeStatement(statements.getRecord, params).then(result => { + if (!result.length) { + return null; + } + return JSON.parse(result[0].getResultByName("record")); + }); + } + + list(params = { filters: {}, order: "" }) { + const parameters = { + collection_name: this.collection, + }; + return this._executeStatement(statements.listRecords, parameters) + .then(result => { + const records = []; + for (let k = 0; k < result.length; k++) { + const row = result[k]; + records.push(JSON.parse(row.getResultByName("record"))); + } + return records; + }) + .then(results => { + // The resulting list of records is filtered and sorted. + // XXX: with some efforts, this could be implemented using SQL. + return reduceRecords(params.filters, params.order, results); + }); + } + + async loadDump(records) { + return this.importBulk(records); + } + + /** + * Load a list of records into the local database. + * + * Note: The adapter is not in charge of filtering the already imported + * records. This is done in `Collection#loadDump()`, as a common behaviour + * between every adapters. + * + * @param {Array} records. + * @return {Array} imported records. + */ + async importBulk(records) { + const connection = this._connection; + const collection_name = this.collection; + await connection.executeTransaction(async function doImport() { + for (let record of records) { + const params = { + collection_name, + record_id: record.id, + record: JSON.stringify(record), + }; + await connection.execute(statements.importData, params); + } + const lastModified = Math.max( + ...records.map(record => record.last_modified) + ); + const params = { + collection_name, + }; + const previousLastModified = await connection + .execute(statements.getLastModified, params) + .then(result => { + return result.length + ? result[0].getResultByName("last_modified") + : -1; + }); + if (lastModified > previousLastModified) { + const params = { + collection_name, + last_modified: lastModified, + }; + await connection.execute(statements.saveLastModified, params); + } + }); + return records; + } + + saveLastModified(lastModified) { + const parsedLastModified = parseInt(lastModified, 10) || null; + const params = { + collection_name: this.collection, + last_modified: parsedLastModified, + }; + return this._executeStatement(statements.saveLastModified, params).then( + () => parsedLastModified + ); + } + + getLastModified() { + const params = { + collection_name: this.collection, + }; + return this._executeStatement(statements.getLastModified, params).then( + result => { + if (!result.length) { + return 0; + } + return result[0].getResultByName("last_modified"); + } + ); + } + + async saveMetadata(metadata) { + const params = { + collection_name: this.collection, + metadata: JSON.stringify(metadata), + }; + await this._executeStatement(statements.saveMetadata, params); + return metadata; + } + + async getMetadata() { + const params = { + collection_name: this.collection, + }; + const result = await this._executeStatement(statements.getMetadata, params); + if (!result.length) { + return null; + } + return JSON.parse(result[0].getResultByName("metadata")); + } + + calculateStorage() { + return this._executeStatement(statements.calculateStorage, {}).then( + result => { + return Array.from(result, row => ({ + collectionName: row.getResultByName("collection_name"), + size: row.getResultByName("size"), + numRecords: row.getResultByName("num_records"), + })); + } + ); + } + + /** + * Reset the sync status of every record and collection we have + * access to. + */ + resetSyncStatus() { + // We're going to use execute instead of executeCached, so build + // in our own sanity check + if (!this._connection) { + throw new Error("The storage adapter is not open"); + } + + return this._connection.executeTransaction(async function (conn) { + const promises = []; + await conn.execute(statements.scanAllRecords, null, function (row) { + const record = JSON.parse(row.getResultByName("record")); + const record_id = row.getResultByName("record_id"); + const collection_name = row.getResultByName("collection_name"); + if (record._status === "deleted") { + // Garbage collect deleted records. + promises.push( + conn.execute(statements.deleteData, { collection_name, record_id }) + ); + } else { + const newRecord = Object.assign({}, record, { + _status: "created", + last_modified: undefined, + }); + promises.push( + conn.execute(statements.updateData, { + record: JSON.stringify(newRecord), + record_id, + collection_name, + }) + ); + } + }); + await Promise.all(promises); + await conn.execute(statements.clearCollectionMetadata); + }); + } +} + +function transactionProxy(collection, preloaded) { + const _operations = []; + + return { + get operations() { + return _operations; + }, + + create(record) { + _operations.push({ + statement: statements.createData, + params: { + collection_name: collection, + record_id: record.id, + record: JSON.stringify(record), + }, + }); + }, + + update(record) { + _operations.push({ + statement: statements.updateData, + params: { + collection_name: collection, + record_id: record.id, + record: JSON.stringify(record), + }, + }); + }, + + delete(id) { + _operations.push({ + statement: statements.deleteData, + params: { + collection_name: collection, + record_id: id, + }, + }); + }, + + get(id) { + // Gecko JS engine outputs undesired warnings if id is not in preloaded. + return id in preloaded ? preloaded[id] : undefined; + }, + }; +} diff --git a/services/common/logmanager.sys.mjs b/services/common/logmanager.sys.mjs new file mode 100644 index 0000000000..724cfde38b --- /dev/null +++ b/services/common/logmanager.sys.mjs @@ -0,0 +1,447 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict;"; + +import { Log } from "resource://gre/modules/Log.sys.mjs"; + +const lazy = {}; + +ChromeUtils.defineESModuleGetters(lazy, { + FileUtils: "resource://gre/modules/FileUtils.sys.mjs", + NetUtil: "resource://gre/modules/NetUtil.sys.mjs", +}); + +const DEFAULT_MAX_ERROR_AGE = 20 * 24 * 60 * 60; // 20 days + +// "shared" logs (ie, where the same log name is used by multiple LogManager +// instances) are a fact of life here - eg, FirefoxAccounts logs are used by +// both Sync and Reading List. +// However, different instances have different pref branches, so we need to +// handle when one pref branch says "Debug" and the other says "Error" +// So we (a) keep singleton console and dump appenders and (b) keep track +// of the minimum (ie, most verbose) level and use that. +// This avoids (a) the most recent setter winning (as that is indeterminate) +// and (b) multiple dump/console appenders being added to the same log multiple +// times, which would cause messages to appear twice. + +// Singletons used by each instance. +var formatter; +var dumpAppender; +var consoleAppender; + +// A set of all preference roots used by all instances. +var allBranches = new Set(); + +const STREAM_SEGMENT_SIZE = 4096; +const PR_UINT32_MAX = 0xffffffff; + +/** + * Append to an nsIStorageStream + * + * This writes logging output to an in-memory stream which can later be read + * back as an nsIInputStream. It can be used to avoid expensive I/O operations + * during logging. Instead, one can periodically consume the input stream and + * e.g. write it to disk asynchronously. + */ +class StorageStreamAppender extends Log.Appender { + constructor(formatter) { + super(formatter); + this._name = "StorageStreamAppender"; + + this._converterStream = null; // holds the nsIConverterOutputStream + this._outputStream = null; // holds the underlying nsIOutputStream + + this._ss = null; + } + + get outputStream() { + if (!this._outputStream) { + // First create a raw stream. We can bail out early if that fails. + this._outputStream = this.newOutputStream(); + if (!this._outputStream) { + return null; + } + + // Wrap the raw stream in an nsIConverterOutputStream. We can reuse + // the instance if we already have one. + if (!this._converterStream) { + this._converterStream = Cc[ + "@mozilla.org/intl/converter-output-stream;1" + ].createInstance(Ci.nsIConverterOutputStream); + } + this._converterStream.init(this._outputStream, "UTF-8"); + } + return this._converterStream; + } + + newOutputStream() { + let ss = (this._ss = Cc["@mozilla.org/storagestream;1"].createInstance( + Ci.nsIStorageStream + )); + ss.init(STREAM_SEGMENT_SIZE, PR_UINT32_MAX, null); + return ss.getOutputStream(0); + } + + getInputStream() { + if (!this._ss) { + return null; + } + return this._ss.newInputStream(0); + } + + reset() { + if (!this._outputStream) { + return; + } + this.outputStream.close(); + this._outputStream = null; + this._ss = null; + } + + doAppend(formatted) { + if (!formatted) { + return; + } + try { + this.outputStream.writeString(formatted + "\n"); + } catch (ex) { + if (ex.result == Cr.NS_BASE_STREAM_CLOSED) { + // The underlying output stream is closed, so let's open a new one + // and try again. + this._outputStream = null; + } + try { + this.outputStream.writeString(formatted + "\n"); + } catch (ex) { + // Ah well, we tried, but something seems to be hosed permanently. + } + } + } +} + +// A storage appender that is flushable to a file on disk. Policies for +// when to flush, to what file, log rotation etc are up to the consumer +// (although it does maintain a .sawError property to help the consumer decide +// based on its policies) +class FlushableStorageAppender extends StorageStreamAppender { + constructor(formatter) { + super(formatter); + this.sawError = false; + } + + append(message) { + if (message.level >= Log.Level.Error) { + this.sawError = true; + } + StorageStreamAppender.prototype.append.call(this, message); + } + + reset() { + super.reset(); + this.sawError = false; + } + + // Flush the current stream to a file. Somewhat counter-intuitively, you + // must pass a log which will be written to with details of the operation. + async flushToFile(subdirArray, filename, log) { + let inStream = this.getInputStream(); + this.reset(); + if (!inStream) { + log.debug("Failed to flush log to a file - no input stream"); + return; + } + log.debug("Flushing file log"); + log.trace("Beginning stream copy to " + filename + ": " + Date.now()); + try { + await this._copyStreamToFile(inStream, subdirArray, filename, log); + log.trace("onCopyComplete", Date.now()); + } catch (ex) { + log.error("Failed to copy log stream to file", ex); + } + } + + /** + * Copy an input stream to the named file, doing everything off the main + * thread. + * subDirArray is an array of path components, relative to the profile + * directory, where the file will be created. + * outputFileName is the filename to create. + * Returns a promise that is resolved on completion or rejected with an error. + */ + async _copyStreamToFile(inputStream, subdirArray, outputFileName, log) { + let outputDirectory = PathUtils.join(PathUtils.profileDir, ...subdirArray); + await IOUtils.makeDirectory(outputDirectory); + let fullOutputFileName = PathUtils.join(outputDirectory, outputFileName); + + let outputStream = Cc[ + "@mozilla.org/network/file-output-stream;1" + ].createInstance(Ci.nsIFileOutputStream); + + outputStream.init( + new lazy.FileUtils.File(fullOutputFileName), + -1, + -1, + Ci.nsIFileOutputStream.DEFER_OPEN + ); + + await new Promise(resolve => + lazy.NetUtil.asyncCopy(inputStream, outputStream, () => resolve()) + ); + + outputStream.close(); + log.trace("finished copy to", fullOutputFileName); + } +} + +// The public LogManager object. +export function LogManager(prefRoot, logNames, logFilePrefix) { + this._prefObservers = []; + this.init(prefRoot, logNames, logFilePrefix); +} + +LogManager.StorageStreamAppender = StorageStreamAppender; + +LogManager.prototype = { + _cleaningUpFileLogs: false, + + init(prefRoot, logNames, logFilePrefix) { + this._prefs = Services.prefs.getBranch(prefRoot); + this._prefsBranch = prefRoot; + + this.logFilePrefix = logFilePrefix; + if (!formatter) { + // Create a formatter and various appenders to attach to the logs. + formatter = new Log.BasicFormatter(); + consoleAppender = new Log.ConsoleAppender(formatter); + dumpAppender = new Log.DumpAppender(formatter); + } + + allBranches.add(this._prefsBranch); + // We create a preference observer for all our prefs so they are magically + // reflected if the pref changes after creation. + let setupAppender = ( + appender, + prefName, + defaultLevel, + findSmallest = false + ) => { + let observer = newVal => { + let level = Log.Level[newVal] || defaultLevel; + if (findSmallest) { + // As some of our appenders have global impact (ie, there is only one + // place 'dump' goes to), we need to find the smallest value from all + // prefs controlling this appender. + // For example, if consumerA has dump=Debug then consumerB sets + // dump=Error, we need to keep dump=Debug so consumerA is respected. + for (let branch of allBranches) { + let lookPrefBranch = Services.prefs.getBranch(branch); + let lookVal = + Log.Level[lookPrefBranch.getStringPref(prefName, null)]; + if (lookVal && lookVal < level) { + level = lookVal; + } + } + } + appender.level = level; + }; + this._prefs.addObserver(prefName, observer); + this._prefObservers.push([prefName, observer]); + // and call the observer now with the current pref value. + observer(this._prefs.getStringPref(prefName, null)); + return observer; + }; + + this._observeConsolePref = setupAppender( + consoleAppender, + "log.appender.console", + Log.Level.Fatal, + true + ); + this._observeDumpPref = setupAppender( + dumpAppender, + "log.appender.dump", + Log.Level.Error, + true + ); + + // The file appender doesn't get the special singleton behaviour. + let fapp = (this._fileAppender = new FlushableStorageAppender(formatter)); + // the stream gets a default of Debug as the user must go out of their way + // to see the stuff spewed to it. + this._observeStreamPref = setupAppender( + fapp, + "log.appender.file.level", + Log.Level.Debug + ); + + // now attach the appenders to all our logs. + for (let logName of logNames) { + let log = Log.repository.getLogger(logName); + for (let appender of [fapp, dumpAppender, consoleAppender]) { + log.addAppender(appender); + } + } + // and use the first specified log as a "root" for our log. + this._log = Log.repository.getLogger(logNames[0] + ".LogManager"); + }, + + /** + * Cleanup this instance + */ + finalize() { + for (let [prefName, observer] of this._prefObservers) { + this._prefs.removeObserver(prefName, observer); + } + this._prefObservers = []; + try { + allBranches.delete(this._prefsBranch); + } catch (e) {} + this._prefs = null; + }, + + get _logFileSubDirectoryEntries() { + // At this point we don't allow a custom directory for the logs, nor allow + // it to be outside the profile directory. + // This returns an array of the the relative directory entries below the + // profile dir, and is the directory about:sync-log uses. + return ["weave", "logs"]; + }, + + get sawError() { + return this._fileAppender.sawError; + }, + + // Result values for resetFileLog. + SUCCESS_LOG_WRITTEN: "success-log-written", + ERROR_LOG_WRITTEN: "error-log-written", + + /** + * Possibly generate a log file for all accumulated log messages and refresh + * the input & output streams. + * Whether a "success" or "error" log is written is determined based on + * whether an "Error" log entry was written to any of the logs. + * Returns a promise that resolves on completion with either null (for no + * file written or on error), SUCCESS_LOG_WRITTEN if a "success" log was + * written, or ERROR_LOG_WRITTEN if an "error" log was written. + */ + async resetFileLog() { + try { + let flushToFile; + let reasonPrefix; + let reason; + if (this._fileAppender.sawError) { + reason = this.ERROR_LOG_WRITTEN; + flushToFile = this._prefs.getBoolPref( + "log.appender.file.logOnError", + true + ); + reasonPrefix = "error"; + } else { + reason = this.SUCCESS_LOG_WRITTEN; + flushToFile = this._prefs.getBoolPref( + "log.appender.file.logOnSuccess", + false + ); + reasonPrefix = "success"; + } + + // might as well avoid creating an input stream if we aren't going to use it. + if (!flushToFile) { + this._fileAppender.reset(); + return null; + } + + // We have reasonPrefix at the start of the filename so all "error" + // logs are grouped in about:sync-log. + let filename = + reasonPrefix + "-" + this.logFilePrefix + "-" + Date.now() + ".txt"; + await this._fileAppender.flushToFile( + this._logFileSubDirectoryEntries, + filename, + this._log + ); + // It's not completely clear to markh why we only do log cleanups + // for errors, but for now the Sync semantics have been copied... + // (one theory is that only cleaning up on error makes it less + // likely old error logs would be removed, but that's not true if + // there are occasional errors - let's address this later!) + if (reason == this.ERROR_LOG_WRITTEN && !this._cleaningUpFileLogs) { + this._log.trace("Running cleanup."); + try { + await this.cleanupLogs(); + } catch (err) { + this._log.error("Failed to cleanup logs", err); + } + } + return reason; + } catch (ex) { + this._log.error("Failed to resetFileLog", ex); + return null; + } + }, + + /** + * Finds all logs older than maxErrorAge and deletes them using async I/O. + */ + cleanupLogs() { + let maxAge = this._prefs.getIntPref( + "log.appender.file.maxErrorAge", + DEFAULT_MAX_ERROR_AGE + ); + let threshold = Date.now() - 1000 * maxAge; + this._log.debug("Log cleanup threshold time: " + threshold); + + let shouldDelete = fileInfo => { + return fileInfo.lastModified < threshold; + }; + return this._deleteLogFiles(shouldDelete); + }, + + /** + * Finds all logs and removes them. + */ + removeAllLogs() { + return this._deleteLogFiles(() => true); + }, + + // Delete some log files. A callback is invoked for each found log file to + // determine if that file should be removed. + async _deleteLogFiles(cbShouldDelete) { + this._cleaningUpFileLogs = true; + let logDir = lazy.FileUtils.getDir( + "ProfD", + this._logFileSubDirectoryEntries + ); + for (const path of await IOUtils.getChildren(logDir.path)) { + const name = PathUtils.filename(path); + + if (!name.startsWith("error-") && !name.startsWith("success-")) { + continue; + } + + try { + const info = await IOUtils.stat(path); + if (!cbShouldDelete(info)) { + continue; + } + + this._log.trace(` > Cleanup removing ${name} (${info.lastModified})`); + await IOUtils.remove(path); + this._log.trace(`Deleted ${name}`); + } catch (ex) { + this._log.debug( + `Encountered error trying to clean up old log file ${name}`, + ex + ); + } + } + this._cleaningUpFileLogs = false; + this._log.debug("Done deleting files."); + // This notification is used only for tests. + Services.obs.notifyObservers( + null, + "services-tests:common:log-manager:cleanup-logs" + ); + }, +}; diff --git a/services/common/modules-testing/logging.sys.mjs b/services/common/modules-testing/logging.sys.mjs new file mode 100644 index 0000000000..63bd306b67 --- /dev/null +++ b/services/common/modules-testing/logging.sys.mjs @@ -0,0 +1,56 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { Log } from "resource://gre/modules/Log.sys.mjs"; + +export function initTestLogging(level) { + function LogStats() { + this.errorsLogged = 0; + } + LogStats.prototype = { + format: function format(message) { + if (message.level == Log.Level.Error) { + this.errorsLogged += 1; + } + + return ( + message.time + + "\t" + + message.loggerName + + "\t" + + message.levelDesc + + "\t" + + this.formatText(message) + + "\n" + ); + }, + }; + Object.setPrototypeOf(LogStats.prototype, new Log.BasicFormatter()); + + let log = Log.repository.rootLogger; + let logStats = new LogStats(); + let appender = new Log.DumpAppender(logStats); + + if (typeof level == "undefined") { + level = "Debug"; + } + getTestLogger().level = Log.Level[level]; + Log.repository.getLogger("Services").level = Log.Level[level]; + + log.level = Log.Level.Trace; + appender.level = Log.Level.Trace; + // Overwrite any other appenders (e.g. from previous incarnations) + log.ownAppenders = [appender]; + log.updateAppenders(); + + // SQLite logging is noisy in these tests - we make it quiet by default + // (although individual tests are free to bump it later) + Log.repository.getLogger("Sqlite").level = Log.Level.Info; + + return logStats; +} + +export function getTestLogger(component) { + return Log.repository.getLogger("Testing"); +} diff --git a/services/common/moz.build b/services/common/moz.build new file mode 100644 index 0000000000..3585dc5426 --- /dev/null +++ b/services/common/moz.build @@ -0,0 +1,47 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +with Files("**"): + BUG_COMPONENT = ("Cloud Services", "Firefox: Common") + +TEST_DIRS += ["tests"] + +EXPORTS.mozilla.appservices += [ + "app_services_logger/AppServicesLoggerComponents.h", +] + +EXTRA_COMPONENTS += [ + "servicesComponents.manifest", +] + +EXTRA_JS_MODULES["services-common"] += [ + "async.sys.mjs", + "kinto-http-client.sys.mjs", + "kinto-offline-client.js", + "kinto-storage-adapter.sys.mjs", + "logmanager.sys.mjs", + "observers.sys.mjs", + "rest.sys.mjs", + "uptake-telemetry.sys.mjs", + "utils.sys.mjs", +] + +if CONFIG["MOZ_WIDGET_TOOLKIT"] != "android": + EXTRA_JS_MODULES["services-common"] += [ + "hawkclient.sys.mjs", + "hawkrequest.sys.mjs", + "tokenserverclient.sys.mjs", + ] + +TESTING_JS_MODULES.services.common += [ + "modules-testing/logging.sys.mjs", +] + +SPHINX_TREES["/services/common"] = "docs" + +XPCOM_MANIFESTS += [ + "app_services_logger/components.conf", +] diff --git a/services/common/observers.sys.mjs b/services/common/observers.sys.mjs new file mode 100644 index 0000000000..c79b2b1bf2 --- /dev/null +++ b/services/common/observers.sys.mjs @@ -0,0 +1,148 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * A service for adding, removing and notifying observers of notifications. + * Wraps the nsIObserverService interface. + * + * @version 0.2 + */ +export var Observers = { + /** + * Register the given callback as an observer of the given topic. + * + * @param topic {String} + * the topic to observe + * + * @param callback {Object} + * the callback; an Object that implements nsIObserver or a Function + * that gets called when the notification occurs + * + * @param thisObject {Object} [optional] + * the object to use as |this| when calling a Function callback + * + * @returns the observer + */ + add(topic, callback, thisObject) { + let observer = new Observer(topic, callback, thisObject); + this._cache.push(observer); + Services.obs.addObserver(observer, topic, true); + + return observer; + }, + + /** + * Unregister the given callback as an observer of the given topic. + * + * @param topic {String} + * the topic being observed + * + * @param callback {Object} + * the callback doing the observing + * + * @param thisObject {Object} [optional] + * the object being used as |this| when calling a Function callback + */ + remove(topic, callback, thisObject) { + // This seems fairly inefficient, but I'm not sure how much better + // we can make it. We could index by topic, but we can't index by callback + // or thisObject, as far as I know, since the keys to JavaScript hashes + // (a.k.a. objects) can apparently only be primitive values. + let [observer] = this._cache.filter( + v => + v.topic == topic && v.callback == callback && v.thisObject == thisObject + ); + if (observer) { + Services.obs.removeObserver(observer, topic); + this._cache.splice(this._cache.indexOf(observer), 1); + } else { + throw new Error("Attempt to remove non-existing observer"); + } + }, + + /** + * Notify observers about something. + * + * @param topic {String} + * the topic to notify observers about + * + * @param subject {Object} [optional] + * some information about the topic; can be any JS object or primitive + * + * @param data {String} [optional] [deprecated] + * some more information about the topic; deprecated as the subject + * is sufficient to pass all needed information to the JS observers + * that this module targets; if you have multiple values to pass to + * the observer, wrap them in an object and pass them via the subject + * parameter (i.e.: { foo: 1, bar: "some string", baz: myObject }) + */ + notify(topic, subject, data) { + subject = typeof subject == "undefined" ? null : new Subject(subject); + data = typeof data == "undefined" ? null : data; + Services.obs.notifyObservers(subject, topic, data); + }, + + /** + * A cache of observers that have been added. + * + * We use this to remove observers when a caller calls |remove|. + * + * XXX This might result in reference cycles, causing memory leaks, + * if we hold a reference to an observer that holds a reference to us. + * Could we fix that by making this an independent top-level object + * rather than a property of this object? + */ + _cache: [], +}; + +function Observer(topic, callback, thisObject) { + this.topic = topic; + this.callback = callback; + this.thisObject = thisObject; +} + +Observer.prototype = { + QueryInterface: ChromeUtils.generateQI([ + "nsIObserver", + "nsISupportsWeakReference", + ]), + observe(subject, topic, data) { + // Extract the wrapped object for subjects that are one of our wrappers + // around a JS object. This way we support both wrapped subjects created + // using this module and those that are real XPCOM components. + if ( + subject && + typeof subject == "object" && + "wrappedJSObject" in subject && + "observersModuleSubjectWrapper" in subject.wrappedJSObject + ) { + subject = subject.wrappedJSObject.object; + } + + if (typeof this.callback == "function") { + if (this.thisObject) { + this.callback.call(this.thisObject, subject, data); + } else { + this.callback(subject, data); + } + } else { + // typeof this.callback == "object" (nsIObserver) + this.callback.observe(subject, topic, data); + } + }, +}; + +function Subject(object) { + // Double-wrap the object and set a property identifying the wrappedJSObject + // as one of our wrappers to distinguish between subjects that are one of our + // wrappers (which we should unwrap when notifying our observers) and those + // that are real JS XPCOM components (which we should pass through unaltered). + this.wrappedJSObject = { observersModuleSubjectWrapper: true, object }; +} + +Subject.prototype = { + QueryInterface: ChromeUtils.generateQI([]), + getScriptableHelper() {}, + getInterfaces() {}, +}; diff --git a/services/common/rest.sys.mjs b/services/common/rest.sys.mjs new file mode 100644 index 0000000000..ed32047bcd --- /dev/null +++ b/services/common/rest.sys.mjs @@ -0,0 +1,720 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { NetUtil } from "resource://gre/modules/NetUtil.sys.mjs"; + +import { Log } from "resource://gre/modules/Log.sys.mjs"; + +import { CommonUtils } from "resource://services-common/utils.sys.mjs"; + +const lazy = {}; + +ChromeUtils.defineESModuleGetters(lazy, { + CryptoUtils: "resource://services-crypto/utils.sys.mjs", +}); + +function decodeString(data, charset) { + if (!data || !charset) { + return data; + } + + // This could be simpler if we assumed the charset is only ever UTF-8. + // It's unclear to me how willing we are to assume this, though... + let stringStream = Cc["@mozilla.org/io/string-input-stream;1"].createInstance( + Ci.nsIStringInputStream + ); + stringStream.setData(data, data.length); + + let converterStream = Cc[ + "@mozilla.org/intl/converter-input-stream;1" + ].createInstance(Ci.nsIConverterInputStream); + + converterStream.init( + stringStream, + charset, + 0, + converterStream.DEFAULT_REPLACEMENT_CHARACTER + ); + + let remaining = data.length; + let body = ""; + while (remaining > 0) { + let str = {}; + let num = converterStream.readString(remaining, str); + if (!num) { + break; + } + remaining -= num; + body += str.value; + } + return body; +} + +/** + * Single use HTTP requests to RESTish resources. + * + * @param uri + * URI for the request. This can be an nsIURI object or a string + * that can be used to create one. An exception will be thrown if + * the string is not a valid URI. + * + * Examples: + * + * (1) Quick GET request: + * + * let response = await new RESTRequest("http://server/rest/resource").get(); + * if (!response.success) { + * // Bail out if we're not getting an HTTP 2xx code. + * processHTTPError(response.status); + * return; + * } + * processData(response.body); + * + * (2) Quick PUT request (non-string data is automatically JSONified) + * + * let response = await new RESTRequest("http://server/rest/resource").put(data); + */ +export function RESTRequest(uri) { + this.status = this.NOT_SENT; + + // If we don't have an nsIURI object yet, make one. This will throw if + // 'uri' isn't a valid URI string. + if (!(uri instanceof Ci.nsIURI)) { + uri = Services.io.newURI(uri); + } + this.uri = uri; + + this._headers = {}; + this._deferred = Promise.withResolvers(); + this._log = Log.repository.getLogger(this._logName); + this._log.manageLevelFromPref("services.common.log.logger.rest.request"); +} + +RESTRequest.prototype = { + _logName: "Services.Common.RESTRequest", + + QueryInterface: ChromeUtils.generateQI([ + "nsIInterfaceRequestor", + "nsIChannelEventSink", + ]), + + /** Public API: **/ + + /** + * URI for the request (an nsIURI object). + */ + uri: null, + + /** + * HTTP method (e.g. "GET") + */ + method: null, + + /** + * RESTResponse object + */ + response: null, + + /** + * nsIRequest load flags. Don't do any caching by default. Don't send user + * cookies and such over the wire (Bug 644734). + */ + loadFlags: + Ci.nsIRequest.LOAD_BYPASS_CACHE | + Ci.nsIRequest.INHIBIT_CACHING | + Ci.nsIRequest.LOAD_ANONYMOUS, + + /** + * nsIHttpChannel + */ + channel: null, + + /** + * Flag to indicate the status of the request. + * + * One of NOT_SENT, SENT, IN_PROGRESS, COMPLETED, ABORTED. + */ + status: null, + + NOT_SENT: 0, + SENT: 1, + IN_PROGRESS: 2, + COMPLETED: 4, + ABORTED: 8, + + /** + * HTTP status text of response + */ + statusText: null, + + /** + * Request timeout (in seconds, though decimal values can be used for + * up to millisecond granularity.) + * + * 0 for no timeout. Default is 300 seconds (5 minutes), the same as Sync uses + * in resource.js. + */ + timeout: 300, + + /** + * The encoding with which the response to this request must be treated. + * If a charset parameter is available in the HTTP Content-Type header for + * this response, that will always be used, and this value is ignored. We + * default to UTF-8 because that is a reasonable default. + */ + charset: "utf-8", + + /** + * Set a request header. + */ + setHeader(name, value) { + this._headers[name.toLowerCase()] = value; + }, + + /** + * Perform an HTTP GET. + * + * @return Promise<RESTResponse> + */ + async get() { + return this.dispatch("GET", null); + }, + + /** + * Perform an HTTP PATCH. + * + * @param data + * Data to be used as the request body. If this isn't a string + * it will be JSONified automatically. + * + * @return Promise<RESTResponse> + */ + async patch(data) { + return this.dispatch("PATCH", data); + }, + + /** + * Perform an HTTP PUT. + * + * @param data + * Data to be used as the request body. If this isn't a string + * it will be JSONified automatically. + * + * @return Promise<RESTResponse> + */ + async put(data) { + return this.dispatch("PUT", data); + }, + + /** + * Perform an HTTP POST. + * + * @param data + * Data to be used as the request body. If this isn't a string + * it will be JSONified automatically. + * + * @return Promise<RESTResponse> + */ + async post(data) { + return this.dispatch("POST", data); + }, + + /** + * Perform an HTTP DELETE. + * + * @return Promise<RESTResponse> + */ + async delete() { + return this.dispatch("DELETE", null); + }, + + /** + * Abort an active request. + */ + abort(rejectWithError = null) { + if (this.status != this.SENT && this.status != this.IN_PROGRESS) { + throw new Error("Can only abort a request that has been sent."); + } + + this.status = this.ABORTED; + this.channel.cancel(Cr.NS_BINDING_ABORTED); + + if (this.timeoutTimer) { + // Clear the abort timer now that the channel is done. + this.timeoutTimer.clear(); + } + if (rejectWithError) { + this._deferred.reject(rejectWithError); + } + }, + + /** Implementation stuff **/ + + async dispatch(method, data) { + if (this.status != this.NOT_SENT) { + throw new Error("Request has already been sent!"); + } + + this.method = method; + + // Create and initialize HTTP channel. + let channel = NetUtil.newChannel({ + uri: this.uri, + loadUsingSystemPrincipal: true, + }) + .QueryInterface(Ci.nsIRequest) + .QueryInterface(Ci.nsIHttpChannel); + this.channel = channel; + channel.loadFlags |= this.loadFlags; + channel.notificationCallbacks = this; + + this._log.debug(`${method} request to ${this.uri.spec}`); + // Set request headers. + let headers = this._headers; + for (let key in headers) { + if (key == "authorization" || key == "x-client-state") { + this._log.trace("HTTP Header " + key + ": ***** (suppressed)"); + } else { + this._log.trace("HTTP Header " + key + ": " + headers[key]); + } + channel.setRequestHeader(key, headers[key], false); + } + + // REST requests accept JSON by default + if (!headers.accept) { + channel.setRequestHeader( + "accept", + "application/json;q=0.9,*/*;q=0.2", + false + ); + } + + // Set HTTP request body. + if (method == "PUT" || method == "POST" || method == "PATCH") { + // Convert non-string bodies into JSON with utf-8 encoding. If a string + // is passed we assume they've already encoded it. + let contentType = headers["content-type"]; + if (typeof data != "string") { + data = JSON.stringify(data); + if (!contentType) { + contentType = "application/json"; + } + if (!contentType.includes("charset")) { + data = CommonUtils.encodeUTF8(data); + contentType += "; charset=utf-8"; + } else { + // If someone handed us an object but also a custom content-type + // it's probably confused. We could go to even further lengths to + // respect it, but this shouldn't happen in practice. + console.error( + "rest.js found an object to JSON.stringify but also a " + + "content-type header with a charset specification. " + + "This probably isn't going to do what you expect" + ); + } + } + if (!contentType) { + contentType = "text/plain"; + } + + this._log.debug(method + " Length: " + data.length); + if (this._log.level <= Log.Level.Trace) { + this._log.trace(method + " Body: " + data); + } + + let stream = Cc["@mozilla.org/io/string-input-stream;1"].createInstance( + Ci.nsIStringInputStream + ); + stream.setData(data, data.length); + + channel.QueryInterface(Ci.nsIUploadChannel); + channel.setUploadStream(stream, contentType, data.length); + } + // We must set this after setting the upload stream, otherwise it + // will always be 'PUT'. Yeah, I know. + channel.requestMethod = method; + + // Before opening the channel, set the charset that serves as a hint + // as to what the response might be encoded as. + channel.contentCharset = this.charset; + + // Blast off! + try { + channel.asyncOpen(this); + } catch (ex) { + // asyncOpen can throw in a bunch of cases -- e.g., a forbidden port. + this._log.warn("Caught an error in asyncOpen", ex); + this._deferred.reject(ex); + } + this.status = this.SENT; + this.delayTimeout(); + return this._deferred.promise; + }, + + /** + * Create or push back the abort timer that kills this request. + */ + delayTimeout() { + if (this.timeout) { + CommonUtils.namedTimer( + this.abortTimeout, + this.timeout * 1000, + this, + "timeoutTimer" + ); + } + }, + + /** + * Abort the request based on a timeout. + */ + abortTimeout() { + this.abort( + Components.Exception( + "Aborting due to channel inactivity.", + Cr.NS_ERROR_NET_TIMEOUT + ) + ); + }, + + /** nsIStreamListener **/ + + onStartRequest(channel) { + if (this.status == this.ABORTED) { + this._log.trace( + "Not proceeding with onStartRequest, request was aborted." + ); + // We might have already rejected, but just in case. + this._deferred.reject( + Components.Exception("Request aborted", Cr.NS_BINDING_ABORTED) + ); + return; + } + + try { + channel.QueryInterface(Ci.nsIHttpChannel); + } catch (ex) { + this._log.error("Unexpected error: channel is not a nsIHttpChannel!"); + this.status = this.ABORTED; + channel.cancel(Cr.NS_BINDING_ABORTED); + this._deferred.reject(ex); + return; + } + + this.status = this.IN_PROGRESS; + + this._log.trace( + "onStartRequest: " + channel.requestMethod + " " + channel.URI.spec + ); + + // Create a new response object. + this.response = new RESTResponse(this); + + this.delayTimeout(); + }, + + onStopRequest(channel, statusCode) { + if (this.timeoutTimer) { + // Clear the abort timer now that the channel is done. + this.timeoutTimer.clear(); + } + + // We don't want to do anything for a request that's already been aborted. + if (this.status == this.ABORTED) { + this._log.trace( + "Not proceeding with onStopRequest, request was aborted." + ); + // We might not have already rejected if the user called reject() manually. + // If we have already rejected, then this is a no-op + this._deferred.reject( + Components.Exception("Request aborted", Cr.NS_BINDING_ABORTED) + ); + return; + } + + try { + channel.QueryInterface(Ci.nsIHttpChannel); + } catch (ex) { + this._log.error("Unexpected error: channel not nsIHttpChannel!"); + this.status = this.ABORTED; + this._deferred.reject(ex); + return; + } + + this.status = this.COMPLETED; + + try { + this.response.body = decodeString( + this.response._rawBody, + this.response.charset + ); + this.response._rawBody = null; + } catch (ex) { + this._log.warn( + `Exception decoding response - ${this.method} ${channel.URI.spec}`, + ex + ); + this._deferred.reject(ex); + return; + } + + let statusSuccess = Components.isSuccessCode(statusCode); + let uri = (channel && channel.URI && channel.URI.spec) || "<unknown>"; + this._log.trace( + "Channel for " + + channel.requestMethod + + " " + + uri + + " returned status code " + + statusCode + ); + + // Throw the failure code and stop execution. Use Components.Exception() + // instead of Error() so the exception is QI-able and can be passed across + // XPCOM borders while preserving the status code. + if (!statusSuccess) { + let message = Components.Exception("", statusCode).name; + let error = Components.Exception(message, statusCode); + this._log.debug( + this.method + " " + uri + " failed: " + statusCode + " - " + message + ); + // Additionally give the full response body when Trace logging. + if (this._log.level <= Log.Level.Trace) { + this._log.trace(this.method + " body", this.response.body); + } + this._deferred.reject(error); + return; + } + + this._log.debug(this.method + " " + uri + " " + this.response.status); + + // Note that for privacy/security reasons we don't log this response body + + delete this._inputStream; + + this._deferred.resolve(this.response); + }, + + onDataAvailable(channel, stream, off, count) { + // We get an nsIRequest, which doesn't have contentCharset. + try { + channel.QueryInterface(Ci.nsIHttpChannel); + } catch (ex) { + this._log.error("Unexpected error: channel not nsIHttpChannel!"); + this.abort(ex); + return; + } + + if (channel.contentCharset) { + this.response.charset = channel.contentCharset; + } else { + this.response.charset = null; + } + + if (!this._inputStream) { + this._inputStream = Cc[ + "@mozilla.org/scriptableinputstream;1" + ].createInstance(Ci.nsIScriptableInputStream); + } + this._inputStream.init(stream); + + this.response._rawBody += this._inputStream.read(count); + + this.delayTimeout(); + }, + + /** nsIInterfaceRequestor **/ + + getInterface(aIID) { + return this.QueryInterface(aIID); + }, + + /** + * Returns true if headers from the old channel should be + * copied to the new channel. Invoked when a channel redirect + * is in progress. + */ + shouldCopyOnRedirect(oldChannel, newChannel, flags) { + let isInternal = !!(flags & Ci.nsIChannelEventSink.REDIRECT_INTERNAL); + let isSameURI = newChannel.URI.equals(oldChannel.URI); + this._log.debug( + "Channel redirect: " + + oldChannel.URI.spec + + ", " + + newChannel.URI.spec + + ", internal = " + + isInternal + ); + return isInternal && isSameURI; + }, + + /** nsIChannelEventSink **/ + asyncOnChannelRedirect(oldChannel, newChannel, flags, callback) { + let oldSpec = + oldChannel && oldChannel.URI ? oldChannel.URI.spec : "<undefined>"; + let newSpec = + newChannel && newChannel.URI ? newChannel.URI.spec : "<undefined>"; + this._log.debug( + "Channel redirect: " + oldSpec + ", " + newSpec + ", " + flags + ); + + try { + newChannel.QueryInterface(Ci.nsIHttpChannel); + } catch (ex) { + this._log.error("Unexpected error: channel not nsIHttpChannel!"); + callback.onRedirectVerifyCallback(Cr.NS_ERROR_NO_INTERFACE); + return; + } + + // For internal redirects, copy the headers that our caller set. + try { + if (this.shouldCopyOnRedirect(oldChannel, newChannel, flags)) { + this._log.trace("Copying headers for safe internal redirect."); + for (let key in this._headers) { + newChannel.setRequestHeader(key, this._headers[key], false); + } + } + } catch (ex) { + this._log.error("Error copying headers", ex); + } + + this.channel = newChannel; + + // We let all redirects proceed. + callback.onRedirectVerifyCallback(Cr.NS_OK); + }, +}; + +/** + * Response object for a RESTRequest. This will be created automatically by + * the RESTRequest. + */ +export function RESTResponse(request = null) { + this.body = ""; + this._rawBody = ""; + this.request = request; + this._log = Log.repository.getLogger(this._logName); + this._log.manageLevelFromPref("services.common.log.logger.rest.response"); +} + +RESTResponse.prototype = { + _logName: "Services.Common.RESTResponse", + + /** + * Corresponding REST request + */ + request: null, + + /** + * HTTP status code + */ + get status() { + let status; + try { + status = this.request.channel.responseStatus; + } catch (ex) { + this._log.debug("Caught exception fetching HTTP status code", ex); + return null; + } + Object.defineProperty(this, "status", { value: status }); + return status; + }, + + /** + * HTTP status text + */ + get statusText() { + let statusText; + try { + statusText = this.request.channel.responseStatusText; + } catch (ex) { + this._log.debug("Caught exception fetching HTTP status text", ex); + return null; + } + Object.defineProperty(this, "statusText", { value: statusText }); + return statusText; + }, + + /** + * Boolean flag that indicates whether the HTTP status code is 2xx or not. + */ + get success() { + let success; + try { + success = this.request.channel.requestSucceeded; + } catch (ex) { + this._log.debug("Caught exception fetching HTTP success flag", ex); + return null; + } + Object.defineProperty(this, "success", { value: success }); + return success; + }, + + /** + * Object containing HTTP headers (keyed as lower case) + */ + get headers() { + let headers = {}; + try { + this._log.trace("Processing response headers."); + let channel = this.request.channel.QueryInterface(Ci.nsIHttpChannel); + channel.visitResponseHeaders(function (header, value) { + headers[header.toLowerCase()] = value; + }); + } catch (ex) { + this._log.debug("Caught exception processing response headers", ex); + return null; + } + + Object.defineProperty(this, "headers", { value: headers }); + return headers; + }, + + /** + * HTTP body (string) + */ + body: null, +}; + +/** + * Single use MAC authenticated HTTP requests to RESTish resources. + * + * @param uri + * URI going to the RESTRequest constructor. + * @param authToken + * (Object) An auth token of the form {id: (string), key: (string)} + * from which the MAC Authentication header for this request will be + * derived. A token as obtained from + * TokenServerClient.getTokenUsingOAuth is accepted. + * @param extra + * (Object) Optional extra parameters. Valid keys are: nonce_bytes, ts, + * nonce, and ext. See CrytoUtils.computeHTTPMACSHA1 for information on + * the purpose of these values. + */ +export function TokenAuthenticatedRESTRequest(uri, authToken, extra) { + RESTRequest.call(this, uri); + this.authToken = authToken; + this.extra = extra || {}; +} + +TokenAuthenticatedRESTRequest.prototype = { + async dispatch(method, data) { + let sig = await lazy.CryptoUtils.computeHTTPMACSHA1( + this.authToken.id, + this.authToken.key, + method, + this.uri, + this.extra + ); + + this.setHeader("Authorization", sig.getHeader()); + + return super.dispatch(method, data); + }, +}; + +Object.setPrototypeOf( + TokenAuthenticatedRESTRequest.prototype, + RESTRequest.prototype +); diff --git a/services/common/servicesComponents.manifest b/services/common/servicesComponents.manifest new file mode 100644 index 0000000000..fe2a52fab6 --- /dev/null +++ b/services/common/servicesComponents.manifest @@ -0,0 +1,2 @@ +# Register resource aliases +resource services-common resource://gre/modules/services-common/ diff --git a/services/common/tests/moz.build b/services/common/tests/moz.build new file mode 100644 index 0000000000..77d3622318 --- /dev/null +++ b/services/common/tests/moz.build @@ -0,0 +1,9 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +XPCSHELL_TESTS_MANIFESTS += ["unit/xpcshell.toml"] + +TEST_DIRS += ["unit"] diff --git a/services/common/tests/unit/head_global.js b/services/common/tests/unit/head_global.js new file mode 100644 index 0000000000..bcb1c063a4 --- /dev/null +++ b/services/common/tests/unit/head_global.js @@ -0,0 +1,35 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +var Cm = Components.manager; + +// Required to avoid failures. +do_get_profile(); +var { XPCOMUtils } = ChromeUtils.importESModule( + "resource://gre/modules/XPCOMUtils.sys.mjs" +); + +const { updateAppInfo } = ChromeUtils.importESModule( + "resource://testing-common/AppInfo.sys.mjs" +); +updateAppInfo({ + name: "XPCShell", + ID: "xpcshell@tests.mozilla.org", + version: "1", + platformVersion: "", +}); + +function addResourceAlias() { + const handler = Services.io + .getProtocolHandler("resource") + .QueryInterface(Ci.nsIResProtocolHandler); + + let modules = ["common", "crypto", "settings"]; + for (let module of modules) { + let uri = Services.io.newURI( + "resource://gre/modules/services-" + module + "/" + ); + handler.setSubstitution("services-" + module, uri); + } +} +addResourceAlias(); diff --git a/services/common/tests/unit/head_helpers.js b/services/common/tests/unit/head_helpers.js new file mode 100644 index 0000000000..bd994ee71a --- /dev/null +++ b/services/common/tests/unit/head_helpers.js @@ -0,0 +1,270 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* import-globals-from head_global.js */ + +var { Log } = ChromeUtils.importESModule("resource://gre/modules/Log.sys.mjs"); +var { CommonUtils } = ChromeUtils.importESModule( + "resource://services-common/utils.sys.mjs" +); +var { + HTTP_400, + HTTP_401, + HTTP_402, + HTTP_403, + HTTP_404, + HTTP_405, + HTTP_406, + HTTP_407, + HTTP_408, + HTTP_409, + HTTP_410, + HTTP_411, + HTTP_412, + HTTP_413, + HTTP_414, + HTTP_415, + HTTP_417, + HTTP_500, + HTTP_501, + HTTP_502, + HTTP_503, + HTTP_504, + HTTP_505, + HttpError, + HttpServer, +} = ChromeUtils.importESModule("resource://testing-common/httpd.sys.mjs"); +var { getTestLogger, initTestLogging } = ChromeUtils.importESModule( + "resource://testing-common/services/common/logging.sys.mjs" +); +var { MockRegistrar } = ChromeUtils.importESModule( + "resource://testing-common/MockRegistrar.sys.mjs" +); +var { NetUtil } = ChromeUtils.importESModule( + "resource://gre/modules/NetUtil.sys.mjs" +); + +function do_check_empty(obj) { + do_check_attribute_count(obj, 0); +} + +function do_check_attribute_count(obj, c) { + Assert.equal(c, Object.keys(obj).length); +} + +function do_check_throws(aFunc, aResult) { + try { + aFunc(); + } catch (e) { + Assert.equal(e.result, aResult); + return; + } + do_throw("Expected result " + aResult + ", none thrown."); +} + +/** + * Test whether specified function throws exception with expected + * result. + * + * @param func + * Function to be tested. + * @param message + * Message of expected exception. <code>null</code> for no throws. + */ +function do_check_throws_message(aFunc, aResult) { + try { + aFunc(); + } catch (e) { + Assert.equal(e.message, aResult); + return; + } + do_throw("Expected an error, none thrown."); +} + +/** + * Print some debug message to the console. All arguments will be printed, + * separated by spaces. + * + * @param [arg0, arg1, arg2, ...] + * Any number of arguments to print out + * @usage _("Hello World") -> prints "Hello World" + * @usage _(1, 2, 3) -> prints "1 2 3" + */ +var _ = function (some, debug, text, to) { + print(Array.from(arguments).join(" ")); +}; + +function httpd_setup(handlers, port = -1) { + let server = new HttpServer(); + for (let path in handlers) { + server.registerPathHandler(path, handlers[path]); + } + try { + server.start(port); + } catch (ex) { + _("=========================================="); + _("Got exception starting HTTP server on port " + port); + _("Error: " + Log.exceptionStr(ex)); + _("Is there a process already listening on port " + port + "?"); + _("=========================================="); + do_throw(ex); + } + + // Set the base URI for convenience. + let i = server.identity; + server.baseURI = + i.primaryScheme + "://" + i.primaryHost + ":" + i.primaryPort; + + return server; +} + +function httpd_handler(statusCode, status, body) { + return function handler(request, response) { + _("Processing request"); + // Allow test functions to inspect the request. + request.body = readBytesFromInputStream(request.bodyInputStream); + handler.request = request; + + response.setStatusLine(request.httpVersion, statusCode, status); + if (body) { + response.bodyOutputStream.write(body, body.length); + } + }; +} + +function promiseStopServer(server) { + return new Promise(resolve => server.stop(resolve)); +} + +/* + * Read bytes string from an nsIInputStream. If 'count' is omitted, + * all available input is read. + */ +function readBytesFromInputStream(inputStream, count) { + if (!count) { + count = inputStream.available(); + } + if (!count) { + return ""; + } + return NetUtil.readInputStreamToString(inputStream, count, { + charset: "UTF-8", + }); +} + +function writeBytesToOutputStream(outputStream, string) { + if (!string) { + return; + } + let converter = Cc[ + "@mozilla.org/intl/converter-output-stream;1" + ].createInstance(Ci.nsIConverterOutputStream); + converter.init(outputStream, "UTF-8"); + converter.writeString(string); + converter.close(); +} + +/* + * Ensure exceptions from inside callbacks leads to test failures. + */ +function ensureThrows(func) { + return function () { + try { + func.apply(this, arguments); + } catch (ex) { + do_throw(ex); + } + }; +} + +/** + * Proxy auth helpers. + */ + +/** + * Fake a PAC to prompt a channel replacement. + */ +var PACSystemSettings = { + QueryInterface: ChromeUtils.generateQI(["nsISystemProxySettings"]), + + // Replace this URI for each test to avoid caching. We want to ensure that + // each test gets a completely fresh setup. + mainThreadOnly: true, + PACURI: null, + getProxyForURI: function getProxyForURI(aURI) { + throw Components.Exception("", Cr.NS_ERROR_NOT_IMPLEMENTED); + }, +}; + +var fakePACCID; +function installFakePAC() { + _("Installing fake PAC."); + fakePACCID = MockRegistrar.register( + "@mozilla.org/system-proxy-settings;1", + PACSystemSettings + ); +} + +function uninstallFakePAC() { + _("Uninstalling fake PAC."); + MockRegistrar.unregister(fakePACCID); +} + +function getUptakeTelemetrySnapshot(component, source) { + const TELEMETRY_CATEGORY_ID = "uptake.remotecontent.result"; + const snapshot = Services.telemetry.snapshotEvents( + Ci.nsITelemetry.DATASET_ALL_CHANNELS, + true + ); + const parentEvents = snapshot.parent || []; + return ( + parentEvents + // Transform raw event data to objects. + .map(([i, category, method, object, value, extras]) => { + return { category, method, object, value, extras }; + }) + // Keep only for the specified component and source. + .filter( + e => + e.category == TELEMETRY_CATEGORY_ID && + e.object == component && + e.extras.source == source + ) + // Return total number of events received by status, to mimic histograms snapshots. + .reduce((acc, e) => { + acc[e.value] = (acc[e.value] || 0) + 1; + return acc; + }, {}) + ); +} + +function checkUptakeTelemetry(snapshot1, snapshot2, expectedIncrements) { + const { UptakeTelemetry } = ChromeUtils.importESModule( + "resource://services-common/uptake-telemetry.sys.mjs" + ); + const STATUSES = Object.values(UptakeTelemetry.STATUS); + for (const status of STATUSES) { + const expected = expectedIncrements[status] || 0; + const previous = snapshot1[status] || 0; + const current = snapshot2[status] || previous; + Assert.equal(expected, current - previous, `check events for ${status}`); + } +} + +async function withFakeChannel(channel, f) { + const { Policy } = ChromeUtils.importESModule( + "resource://services-common/uptake-telemetry.sys.mjs" + ); + let oldGetChannel = Policy.getChannel; + Policy.getChannel = () => channel; + try { + return await f(); + } finally { + Policy.getChannel = oldGetChannel; + } +} + +function arrayEqual(a, b) { + return JSON.stringify(a) == JSON.stringify(b); +} diff --git a/services/common/tests/unit/head_http.js b/services/common/tests/unit/head_http.js new file mode 100644 index 0000000000..6c2a0a3f0f --- /dev/null +++ b/services/common/tests/unit/head_http.js @@ -0,0 +1,33 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +/* import-globals-from head_global.js */ + +var { CommonUtils } = ChromeUtils.importESModule( + "resource://services-common/utils.sys.mjs" +); + +function basic_auth_header(user, password) { + return "Basic " + btoa(user + ":" + CommonUtils.encodeUTF8(password)); +} + +function basic_auth_matches(req, user, password) { + if (!req.hasHeader("Authorization")) { + return false; + } + + let expected = basic_auth_header(user, CommonUtils.encodeUTF8(password)); + return req.getHeader("Authorization") == expected; +} + +function httpd_basic_auth_handler(body, metadata, response) { + if (basic_auth_matches(metadata, "guest", "guest")) { + response.setStatusLine(metadata.httpVersion, 200, "OK, authorized"); + response.setHeader("WWW-Authenticate", 'Basic realm="secret"', false); + } else { + body = "This path exists and is protected - failed"; + response.setStatusLine(metadata.httpVersion, 401, "Unauthorized"); + response.setHeader("WWW-Authenticate", 'Basic realm="secret"', false); + } + response.bodyOutputStream.write(body, body.length); +} diff --git a/services/common/tests/unit/moz.build b/services/common/tests/unit/moz.build new file mode 100644 index 0000000000..568f361a54 --- /dev/null +++ b/services/common/tests/unit/moz.build @@ -0,0 +1,5 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. diff --git a/services/common/tests/unit/test_async_chain.js b/services/common/tests/unit/test_async_chain.js new file mode 100644 index 0000000000..3277c92f81 --- /dev/null +++ b/services/common/tests/unit/test_async_chain.js @@ -0,0 +1,40 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { Async } = ChromeUtils.importESModule( + "resource://services-common/async.sys.mjs" +); + +function run_test() { + _("Chain a few async methods, making sure the 'this' object is correct."); + + let methods = { + save(x, callback) { + this.x = x; + callback(x); + }, + addX(x, callback) { + callback(x + this.x); + }, + double(x, callback) { + callback(x * 2); + }, + neg(x, callback) { + callback(-x); + }, + }; + methods.chain = Async.chain; + + // ((1 + 1 + 1) * (-1) + 1) * 2 + 1 = -3 + methods.chain( + methods.save, + methods.addX, + methods.addX, + methods.neg, + methods.addX, + methods.double, + methods.addX, + methods.save + )(1); + Assert.equal(methods.x, -3); +} diff --git a/services/common/tests/unit/test_async_foreach.js b/services/common/tests/unit/test_async_foreach.js new file mode 100644 index 0000000000..14a23e7ff7 --- /dev/null +++ b/services/common/tests/unit/test_async_foreach.js @@ -0,0 +1,88 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { Async } = ChromeUtils.importESModule( + "resource://services-common/async.sys.mjs" +); +const { sinon } = ChromeUtils.importESModule( + "resource://testing-common/Sinon.sys.mjs" +); + +function makeArray(length) { + // Start at 1 so that we can just divide by yieldEvery to get the expected + // call count. (we exp) + return Array.from({ length }, (v, i) => i + 1); +} + +// Adjust if we ever change the default. +const DEFAULT_YIELD_EVERY = 50; + +add_task(async function testYields() { + let spy = sinon.spy(Async, "promiseYield"); + try { + await Async.yieldingForEach(makeArray(DEFAULT_YIELD_EVERY * 2), element => { + // The yield will happen *after* this function is ran. + Assert.equal( + spy.callCount, + Math.floor((element - 1) / DEFAULT_YIELD_EVERY) + ); + }); + } finally { + spy.restore(); + } +}); + +add_task(async function testExistingYieldState() { + const yieldState = Async.yieldState(DEFAULT_YIELD_EVERY); + + for (let i = 0; i < 15; i++) { + Assert.equal(yieldState.shouldYield(), false); + } + + let spy = sinon.spy(Async, "promiseYield"); + + try { + await Async.yieldingForEach( + makeArray(DEFAULT_YIELD_EVERY * 2), + element => { + Assert.equal( + spy.callCount, + Math.floor((element + 15 - 1) / DEFAULT_YIELD_EVERY) + ); + }, + yieldState + ); + } finally { + spy.restore(); + } +}); + +add_task(async function testEarlyReturn() { + let lastElement = 0; + await Async.yieldingForEach(makeArray(DEFAULT_YIELD_EVERY), element => { + lastElement = element; + return element === 10; + }); + + Assert.equal(lastElement, 10); +}); + +add_task(async function testEaryReturnAsync() { + let lastElement = 0; + await Async.yieldingForEach(makeArray(DEFAULT_YIELD_EVERY), async element => { + lastElement = element; + return element === 10; + }); + + Assert.equal(lastElement, 10); +}); + +add_task(async function testEarlyReturnPromise() { + let lastElement = 0; + await Async.yieldingForEach(makeArray(DEFAULT_YIELD_EVERY), element => { + lastElement = element; + return new Promise(resolve => resolve(element === 10)); + }); + + Assert.equal(lastElement, 10); +}); diff --git a/services/common/tests/unit/test_hawkclient.js b/services/common/tests/unit/test_hawkclient.js new file mode 100644 index 0000000000..33d125f42e --- /dev/null +++ b/services/common/tests/unit/test_hawkclient.js @@ -0,0 +1,515 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +const { HawkClient } = ChromeUtils.importESModule( + "resource://services-common/hawkclient.sys.mjs" +); + +const SECOND_MS = 1000; +const MINUTE_MS = SECOND_MS * 60; +const HOUR_MS = MINUTE_MS * 60; + +const TEST_CREDS = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", +}; + +initTestLogging("Trace"); + +add_task(function test_now() { + let client = new HawkClient("https://example.com"); + + Assert.ok(client.now() - Date.now() < SECOND_MS); +}); + +add_task(function test_updateClockOffset() { + let client = new HawkClient("https://example.com"); + + let now = new Date(); + let serverDate = now.toUTCString(); + + // Client's clock is off + client.now = () => { + return now.valueOf() + HOUR_MS; + }; + + client._updateClockOffset(serverDate); + + // Check that they're close; there will likely be a one-second rounding + // error, so checking strict equality will likely fail. + // + // localtimeOffsetMsec is how many milliseconds to add to the local clock so + // that it agrees with the server. We are one hour ahead of the server, so + // our offset should be -1 hour. + Assert.ok(Math.abs(client.localtimeOffsetMsec + HOUR_MS) <= SECOND_MS); +}); + +add_task(async function test_authenticated_get_request() { + let message = '{"msg": "Great Success!"}'; + let method = "GET"; + + let server = httpd_setup({ + "/foo": (request, response) => { + Assert.ok(request.hasHeader("Authorization")); + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + let response = await client.request("/foo", method, TEST_CREDS); + let result = JSON.parse(response.body); + + Assert.equal("Great Success!", result.msg); + + await promiseStopServer(server); +}); + +async function check_authenticated_request(method) { + let server = httpd_setup({ + "/foo": (request, response) => { + Assert.ok(request.hasHeader("Authorization")); + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json"); + response.bodyOutputStream.writeFrom( + request.bodyInputStream, + request.bodyInputStream.available() + ); + }, + }); + + let client = new HawkClient(server.baseURI); + + let response = await client.request("/foo", method, TEST_CREDS, { + foo: "bar", + }); + let result = JSON.parse(response.body); + + Assert.equal("bar", result.foo); + + await promiseStopServer(server); +} + +add_task(async function test_authenticated_post_request() { + await check_authenticated_request("POST"); +}); + +add_task(async function test_authenticated_put_request() { + await check_authenticated_request("PUT"); +}); + +add_task(async function test_authenticated_patch_request() { + await check_authenticated_request("PATCH"); +}); + +add_task(async function test_extra_headers() { + let server = httpd_setup({ + "/foo": (request, response) => { + Assert.ok(request.hasHeader("Authorization")); + Assert.ok(request.hasHeader("myHeader")); + Assert.equal(request.getHeader("myHeader"), "fake"); + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json"); + response.bodyOutputStream.writeFrom( + request.bodyInputStream, + request.bodyInputStream.available() + ); + }, + }); + + let client = new HawkClient(server.baseURI); + + let response = await client.request( + "/foo", + "POST", + TEST_CREDS, + { foo: "bar" }, + { myHeader: "fake" } + ); + let result = JSON.parse(response.body); + + Assert.equal("bar", result.foo); + + await promiseStopServer(server); +}); + +add_task(async function test_credentials_optional() { + let method = "GET"; + let server = httpd_setup({ + "/foo": (request, response) => { + Assert.ok(!request.hasHeader("Authorization")); + + let message = JSON.stringify({ msg: "you're in the friend zone" }); + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + let result = await client.request("/foo", method); // credentials undefined + Assert.equal(JSON.parse(result.body).msg, "you're in the friend zone"); + + await promiseStopServer(server); +}); + +add_task(async function test_server_error() { + let message = "Ohai!"; + let method = "GET"; + + let server = httpd_setup({ + "/foo": (request, response) => { + response.setStatusLine(request.httpVersion, 418, "I am a Teapot"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + try { + await client.request("/foo", method, TEST_CREDS); + do_throw("Expected an error"); + } catch (err) { + Assert.equal(418, err.code); + Assert.equal("I am a Teapot", err.message); + } + + await promiseStopServer(server); +}); + +add_task(async function test_server_error_json() { + let message = JSON.stringify({ error: "Cannot get ye flask." }); + let method = "GET"; + + let server = httpd_setup({ + "/foo": (request, response) => { + response.setStatusLine( + request.httpVersion, + 400, + "What wouldst thou deau?" + ); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + try { + await client.request("/foo", method, TEST_CREDS); + do_throw("Expected an error"); + } catch (err) { + Assert.equal("Cannot get ye flask.", err.error); + } + + await promiseStopServer(server); +}); + +add_task(async function test_offset_after_request() { + let message = "Ohai!"; + let method = "GET"; + + let server = httpd_setup({ + "/foo": (request, response) => { + response.setStatusLine(request.httpVersion, 200, "OK"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + let now = Date.now(); + client.now = () => { + return now + HOUR_MS; + }; + + Assert.equal(client.localtimeOffsetMsec, 0); + + await client.request("/foo", method, TEST_CREDS); + // Should be about an hour off + Assert.ok(Math.abs(client.localtimeOffsetMsec + HOUR_MS) < SECOND_MS); + + await promiseStopServer(server); +}); + +add_task(async function test_offset_in_hawk_header() { + let message = "Ohai!"; + let method = "GET"; + + let server = httpd_setup({ + "/first": function (request, response) { + response.setStatusLine(request.httpVersion, 200, "OK"); + response.bodyOutputStream.write(message, message.length); + }, + + "/second": function (request, response) { + // We see a better date now in the ts component of the header + let delta = getTimestampDelta(request.getHeader("Authorization")); + + // We're now within HAWK's one-minute window. + // I hope this isn't a recipe for intermittent oranges ... + if (delta < MINUTE_MS) { + response.setStatusLine(request.httpVersion, 200, "OK"); + } else { + response.setStatusLine(request.httpVersion, 400, "Delta: " + delta); + } + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + client.now = () => { + return Date.now() + 12 * HOUR_MS; + }; + + // We begin with no offset + Assert.equal(client.localtimeOffsetMsec, 0); + await client.request("/first", method, TEST_CREDS); + + // After the first server response, our offset is updated to -12 hours. + // We should be safely in the window, now. + Assert.ok(Math.abs(client.localtimeOffsetMsec + 12 * HOUR_MS) < MINUTE_MS); + await client.request("/second", method, TEST_CREDS); + + await promiseStopServer(server); +}); + +add_task(async function test_2xx_success() { + // Just to ensure that we're not biased toward 200 OK for success + let credentials = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", + }; + let method = "GET"; + + let server = httpd_setup({ + "/foo": (request, response) => { + response.setStatusLine(request.httpVersion, 202, "Accepted"); + }, + }); + + let client = new HawkClient(server.baseURI); + + let response = await client.request("/foo", method, credentials); + + // Shouldn't be any content in a 202 + Assert.equal(response.body, ""); + + await promiseStopServer(server); +}); + +add_task(async function test_retry_request_on_fail() { + let attempts = 0; + let credentials = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", + }; + let method = "GET"; + + let server = httpd_setup({ + "/maybe": function (request, response) { + // This path should be hit exactly twice; once with a bad timestamp, and + // again when the client retries the request with a corrected timestamp. + attempts += 1; + Assert.ok(attempts <= 2); + + let delta = getTimestampDelta(request.getHeader("Authorization")); + + // First time through, we should have a bad timestamp + if (attempts === 1) { + Assert.ok(delta > MINUTE_MS); + let message = "never!!!"; + response.setStatusLine(request.httpVersion, 401, "Unauthorized"); + response.bodyOutputStream.write(message, message.length); + return; + } + + // Second time through, timestamp should be corrected by client + Assert.ok(delta < MINUTE_MS); + let message = "i love you!!!"; + response.setStatusLine(request.httpVersion, 200, "OK"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + client.now = () => { + return Date.now() + 12 * HOUR_MS; + }; + + // We begin with no offset + Assert.equal(client.localtimeOffsetMsec, 0); + + // Request will have bad timestamp; client will retry once + let response = await client.request("/maybe", method, credentials); + Assert.equal(response.body, "i love you!!!"); + + await promiseStopServer(server); +}); + +add_task(async function test_multiple_401_retry_once() { + // Like test_retry_request_on_fail, but always return a 401 + // and ensure that the client only retries once. + let attempts = 0; + let credentials = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", + }; + let method = "GET"; + + let server = httpd_setup({ + "/maybe": function (request, response) { + // This path should be hit exactly twice; once with a bad timestamp, and + // again when the client retries the request with a corrected timestamp. + attempts += 1; + + Assert.ok(attempts <= 2); + + let message = "never!!!"; + response.setStatusLine(request.httpVersion, 401, "Unauthorized"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + client.now = () => { + return Date.now() - 12 * HOUR_MS; + }; + + // We begin with no offset + Assert.equal(client.localtimeOffsetMsec, 0); + + // Request will have bad timestamp; client will retry once + try { + await client.request("/maybe", method, credentials); + do_throw("Expected an error"); + } catch (err) { + Assert.equal(err.code, 401); + } + Assert.equal(attempts, 2); + + await promiseStopServer(server); +}); + +add_task(async function test_500_no_retry() { + // If we get a 500 error, the client should not retry (as it would with a + // 401) + let credentials = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", + }; + let method = "GET"; + + let server = httpd_setup({ + "/no-shutup": function (request, response) { + let message = "Cannot get ye flask."; + response.setStatusLine(request.httpVersion, 500, "Internal server error"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + // Throw off the clock so the HawkClient would want to retry the request if + // it could + client.now = () => { + return Date.now() - 12 * HOUR_MS; + }; + + // Request will 500; no retries + try { + await client.request("/no-shutup", method, credentials); + do_throw("Expected an error"); + } catch (err) { + Assert.equal(err.code, 500); + } + + await promiseStopServer(server); +}); + +add_task(async function test_401_then_500() { + // Like test_multiple_401_retry_once, but return a 500 to the + // second request, ensuring that the promise is properly rejected + // in client.request. + let attempts = 0; + let credentials = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", + }; + let method = "GET"; + + let server = httpd_setup({ + "/maybe": function (request, response) { + // This path should be hit exactly twice; once with a bad timestamp, and + // again when the client retries the request with a corrected timestamp. + attempts += 1; + Assert.ok(attempts <= 2); + + let delta = getTimestampDelta(request.getHeader("Authorization")); + + // First time through, we should have a bad timestamp + // Client will retry + if (attempts === 1) { + Assert.ok(delta > MINUTE_MS); + let message = "never!!!"; + response.setStatusLine(request.httpVersion, 401, "Unauthorized"); + response.bodyOutputStream.write(message, message.length); + return; + } + + // Second time through, timestamp should be corrected by client + // And fail on the client + Assert.ok(delta < MINUTE_MS); + let message = "Cannot get ye flask."; + response.setStatusLine(request.httpVersion, 500, "Internal server error"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let client = new HawkClient(server.baseURI); + + client.now = () => { + return Date.now() - 12 * HOUR_MS; + }; + + // We begin with no offset + Assert.equal(client.localtimeOffsetMsec, 0); + + // Request will have bad timestamp; client will retry once + try { + await client.request("/maybe", method, credentials); + } catch (err) { + Assert.equal(err.code, 500); + } + Assert.equal(attempts, 2); + + await promiseStopServer(server); +}); + +// End of tests. +// Utility functions follow + +function getTimestampDelta(authHeader, now = Date.now()) { + let tsMS = new Date( + parseInt(/ts="(\d+)"/.exec(authHeader)[1], 10) * SECOND_MS + ); + return Math.abs(tsMS - now); +} + +function run_test() { + initTestLogging("Trace"); + run_next_test(); +} diff --git a/services/common/tests/unit/test_hawkrequest.js b/services/common/tests/unit/test_hawkrequest.js new file mode 100644 index 0000000000..13ab6737de --- /dev/null +++ b/services/common/tests/unit/test_hawkrequest.js @@ -0,0 +1,231 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +const { HAWKAuthenticatedRESTRequest, deriveHawkCredentials } = + ChromeUtils.importESModule("resource://services-common/hawkrequest.sys.mjs"); +const { Async } = ChromeUtils.importESModule( + "resource://services-common/async.sys.mjs" +); + +// https://github.com/mozilla/fxa-auth-server/wiki/onepw-protocol#wiki-use-session-certificatesign-etc +var SESSION_KEYS = { + sessionToken: h( + // eslint-disable-next-line no-useless-concat + "a0a1a2a3a4a5a6a7 a8a9aaabacadaeaf" + "b0b1b2b3b4b5b6b7 b8b9babbbcbdbebf" + ), + + tokenID: h( + // eslint-disable-next-line no-useless-concat + "c0a29dcf46174973 da1378696e4c82ae" + "10f723cf4f4d9f75 e39f4ae3851595ab" + ), + + reqHMACkey: h( + // eslint-disable-next-line no-useless-concat + "9d8f22998ee7f579 8b887042466b72d5" + "3e56ab0c094388bf 65831f702d2febc0" + ), +}; + +function do_register_cleanup() { + Services.prefs.clearUserPref("intl.accept_languages"); + Services.prefs.clearUserPref("services.common.log.logger.rest.request"); + + // remove the pref change listener + let hawk = new HAWKAuthenticatedRESTRequest("https://example.com"); + hawk._intl.uninit(); +} + +function run_test() { + registerCleanupFunction(do_register_cleanup); + + Services.prefs.setStringPref( + "services.common.log.logger.rest.request", + "Trace" + ); + initTestLogging("Trace"); + + run_next_test(); +} + +add_test(function test_intl_accept_language() { + let testCount = 0; + let languages = [ + "zu-NP;vo", // Nepalese dialect of Zulu, defaulting to Volapük + "fa-CG;ik", // Congolese dialect of Farsei, defaulting to Inupiaq + ]; + + function setLanguagePref(lang) { + Services.prefs.setStringPref("intl.accept_languages", lang); + } + + let hawk = new HAWKAuthenticatedRESTRequest("https://example.com"); + + Services.prefs.addObserver("intl.accept_languages", checkLanguagePref); + setLanguagePref(languages[testCount]); + + function checkLanguagePref() { + CommonUtils.nextTick(function () { + // Ensure we're only called for the number of entries in languages[]. + Assert.ok(testCount < languages.length); + + Assert.equal(hawk._intl.accept_languages, languages[testCount]); + + testCount++; + if (testCount < languages.length) { + // Set next language in prefs; Pref service will call checkNextLanguage. + setLanguagePref(languages[testCount]); + return; + } + + // We've checked all the entries in languages[]. Cleanup and move on. + info( + "Checked " + + testCount + + " languages. Removing checkLanguagePref as pref observer." + ); + Services.prefs.removeObserver("intl.accept_languages", checkLanguagePref); + run_next_test(); + }); + } +}); + +add_task(async function test_hawk_authenticated_request() { + let postData = { your: "data" }; + + // An arbitrary date - Feb 2, 1971. It ends in a bunch of zeroes to make our + // computation with the hawk timestamp easier, since hawk throws away the + // millisecond values. + let then = 34329600000; + + let clockSkew = 120000; + let timeOffset = -1 * clockSkew; + let localTime = then + clockSkew; + + // Set the accept-languages pref to the Nepalese dialect of Zulu. + let acceptLanguage = "zu-NP"; // omit trailing ';', which our HTTP libs snip + Services.prefs.setStringPref("intl.accept_languages", acceptLanguage); + + let credentials = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", + }; + + let server = httpd_setup({ + "/elysium": function (request, response) { + Assert.ok(request.hasHeader("Authorization")); + + // check that the header timestamp is our arbitrary system date, not + // today's date. Note that hawk header timestamps are in seconds, not + // milliseconds. + let authorization = request.getHeader("Authorization"); + let tsMS = parseInt(/ts="(\d+)"/.exec(authorization)[1], 10) * 1000; + Assert.equal(tsMS, then); + + // This testing can be a little wonky. In an environment where + // pref("intl.accept_languages") === 'en-US, en' + // the header is sent as: + // 'en-US,en;q=0.5' + // hence our fake value for acceptLanguage. + let lang = request.getHeader("Accept-Language"); + Assert.equal(lang, acceptLanguage); + + let message = "yay"; + response.setStatusLine(request.httpVersion, 200, "OK"); + response.bodyOutputStream.write(message, message.length); + }, + }); + + let url = server.baseURI + "/elysium"; + let extra = { + now: localTime, + localtimeOffsetMsec: timeOffset, + }; + + let request = new HAWKAuthenticatedRESTRequest(url, credentials, extra); + + // Allow hawk._intl to respond to the language pref change + await Async.promiseYield(); + + await request.post(postData); + Assert.equal(200, request.response.status); + Assert.equal(request.response.body, "yay"); + + Services.prefs.clearUserPref("intl.accept_languages"); + let pref = Services.prefs.getComplexValue( + "intl.accept_languages", + Ci.nsIPrefLocalizedString + ); + Assert.notEqual(acceptLanguage, pref.data); + + await promiseStopServer(server); +}); + +add_task(async function test_hawk_language_pref_changed() { + let languages = [ + "zu-NP", // Nepalese dialect of Zulu + "fa-CG", // Congolese dialect of Farsi + ]; + + let credentials = { + id: "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x", + key: "qTZf4ZFpAMpMoeSsX3zVRjiqmNs=", + algorithm: "sha256", + }; + + function setLanguage(lang) { + Services.prefs.setStringPref("intl.accept_languages", lang); + } + + let server = httpd_setup({ + "/foo": function (request, response) { + Assert.equal(languages[1], request.getHeader("Accept-Language")); + + response.setStatusLine(request.httpVersion, 200, "OK"); + }, + }); + + let url = server.baseURI + "/foo"; + let request; + + setLanguage(languages[0]); + + // A new request should create the stateful object for tracking the current + // language. + request = new HAWKAuthenticatedRESTRequest(url, credentials); + + // Wait for change to propagate + await Async.promiseYield(); + Assert.equal(languages[0], request._intl.accept_languages); + + // Change the language pref ... + setLanguage(languages[1]); + + await Async.promiseYield(); + + request = new HAWKAuthenticatedRESTRequest(url, credentials); + let response = await request.post({}); + + Assert.equal(200, response.status); + Services.prefs.clearUserPref("intl.accept_languages"); + + await promiseStopServer(server); +}); + +add_task(async function test_deriveHawkCredentials() { + let credentials = await deriveHawkCredentials( + SESSION_KEYS.sessionToken, + "sessionToken" + ); + Assert.equal(credentials.id, SESSION_KEYS.tokenID); + Assert.equal( + CommonUtils.bytesAsHex(credentials.key), + SESSION_KEYS.reqHMACkey + ); +}); + +// turn formatted test vectors into normal hex strings +function h(hexStr) { + return hexStr.replace(/\s+/g, ""); +} diff --git a/services/common/tests/unit/test_kinto.js b/services/common/tests/unit/test_kinto.js new file mode 100644 index 0000000000..4b5e8471b1 --- /dev/null +++ b/services/common/tests/unit/test_kinto.js @@ -0,0 +1,512 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { Kinto } = ChromeUtils.import( + "resource://services-common/kinto-offline-client.js" +); +const { FirefoxAdapter } = ChromeUtils.importESModule( + "resource://services-common/kinto-storage-adapter.sys.mjs" +); + +var server; + +// set up what we need to make storage adapters +const kintoFilename = "kinto.sqlite"; + +function do_get_kinto_sqliteHandle() { + return FirefoxAdapter.openConnection({ path: kintoFilename }); +} + +function do_get_kinto_collection(sqliteHandle, collection = "test_collection") { + let config = { + remote: `http://localhost:${server.identity.primaryPort}/v1/`, + headers: { Authorization: "Basic " + btoa("user:pass") }, + adapter: FirefoxAdapter, + adapterOptions: { sqliteHandle }, + }; + return new Kinto(config).collection(collection); +} + +async function clear_collection() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + await collection.clear(); + } finally { + await sqliteHandle.close(); + } +} + +// test some operations on a local collection +add_task(async function test_kinto_add_get() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + + let newRecord = { foo: "bar" }; + // check a record is created + let createResult = await collection.create(newRecord); + Assert.equal(createResult.data.foo, newRecord.foo); + // check getting the record gets the same info + let getResult = await collection.get(createResult.data.id); + deepEqual(createResult.data, getResult.data); + // check what happens if we create the same item again (it should throw + // since you can't create with id) + try { + await collection.create(createResult.data); + do_throw("Creation of a record with an id should fail"); + } catch (err) {} + // try a few creates without waiting for the first few to resolve + let promises = []; + promises.push(collection.create(newRecord)); + promises.push(collection.create(newRecord)); + promises.push(collection.create(newRecord)); + await collection.create(newRecord); + await Promise.all(promises); + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +// test some operations on multiple connections +add_task(async function test_kinto_add_get() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection1 = do_get_kinto_collection(sqliteHandle); + const collection2 = do_get_kinto_collection( + sqliteHandle, + "test_collection_2" + ); + + let newRecord = { foo: "bar" }; + + // perform several write operations alternately without waiting for promises + // to resolve + let promises = []; + for (let i = 0; i < 10; i++) { + promises.push(collection1.create(newRecord)); + promises.push(collection2.create(newRecord)); + } + + // ensure subsequent operations still work + await Promise.all([ + collection1.create(newRecord), + collection2.create(newRecord), + ]); + await Promise.all(promises); + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +add_task(async function test_kinto_update() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + const newRecord = { foo: "bar" }; + // check a record is created + let createResult = await collection.create(newRecord); + Assert.equal(createResult.data.foo, newRecord.foo); + Assert.equal(createResult.data._status, "created"); + // check we can update this OK + let copiedRecord = Object.assign(createResult.data, {}); + deepEqual(createResult.data, copiedRecord); + copiedRecord.foo = "wibble"; + let updateResult = await collection.update(copiedRecord); + // check the field was updated + Assert.equal(updateResult.data.foo, copiedRecord.foo); + // check the status is still "created", since we haven't synced + // the record + Assert.equal(updateResult.data._status, "created"); + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +add_task(async function test_kinto_clear() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + + // create an expected number of records + const expected = 10; + const newRecord = { foo: "bar" }; + for (let i = 0; i < expected; i++) { + await collection.create(newRecord); + } + // check the collection contains the correct number + let list = await collection.list(); + Assert.equal(list.data.length, expected); + // clear the collection and check again - should be 0 + await collection.clear(); + list = await collection.list(); + Assert.equal(list.data.length, 0); + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +add_task(async function test_kinto_delete() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + const newRecord = { foo: "bar" }; + // check a record is created + let createResult = await collection.create(newRecord); + Assert.equal(createResult.data.foo, newRecord.foo); + // check getting the record gets the same info + let getResult = await collection.get(createResult.data.id); + deepEqual(createResult.data, getResult.data); + // delete that record + let deleteResult = await collection.delete(createResult.data.id); + // check the ID is set on the result + Assert.equal(getResult.data.id, deleteResult.data.id); + // and check that get no longer returns the record + try { + getResult = await collection.get(createResult.data.id); + do_throw("there should not be a result"); + } catch (e) {} + } finally { + await sqliteHandle.close(); + } +}); + +add_task(async function test_kinto_list() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + const expected = 10; + const created = []; + for (let i = 0; i < expected; i++) { + let newRecord = { foo: "test " + i }; + let createResult = await collection.create(newRecord); + created.push(createResult.data); + } + // check the collection contains the correct number + let list = await collection.list(); + Assert.equal(list.data.length, expected); + + // check that all created records exist in the retrieved list + for (let createdRecord of created) { + let found = false; + for (let retrievedRecord of list.data) { + if (createdRecord.id == retrievedRecord.id) { + deepEqual(createdRecord, retrievedRecord); + found = true; + } + } + Assert.ok(found); + } + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +add_task(async function test_importBulk_ignores_already_imported_records() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + const record = { + id: "41b71c13-17e9-4ee3-9268-6a41abf9730f", + title: "foo", + last_modified: 1457896541, + }; + await collection.importBulk([record]); + let impactedRecords = await collection.importBulk([record]); + Assert.equal(impactedRecords.length, 0); + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +add_task(async function test_loadDump_should_overwrite_old_records() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + const record = { + id: "41b71c13-17e9-4ee3-9268-6a41abf9730f", + title: "foo", + last_modified: 1457896541, + }; + await collection.loadDump([record]); + const updated = Object.assign({}, record, { last_modified: 1457896543 }); + let impactedRecords = await collection.loadDump([updated]); + Assert.equal(impactedRecords.length, 1); + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +add_task(async function test_loadDump_should_not_overwrite_unsynced_records() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + const recordId = "41b71c13-17e9-4ee3-9268-6a41abf9730f"; + await collection.create( + { id: recordId, title: "foo" }, + { useRecordId: true } + ); + const record = { id: recordId, title: "bar", last_modified: 1457896541 }; + let impactedRecords = await collection.loadDump([record]); + Assert.equal(impactedRecords.length, 0); + } finally { + await sqliteHandle.close(); + } +}); + +add_task(clear_collection); + +add_task( + async function test_loadDump_should_not_overwrite_records_without_last_modified() { + let sqliteHandle; + try { + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + const recordId = "41b71c13-17e9-4ee3-9268-6a41abf9730f"; + await collection.create({ id: recordId, title: "foo" }, { synced: true }); + const record = { id: recordId, title: "bar", last_modified: 1457896541 }; + let impactedRecords = await collection.loadDump([record]); + Assert.equal(impactedRecords.length, 0); + } finally { + await sqliteHandle.close(); + } + } +); + +add_task(clear_collection); + +// Now do some sanity checks against a server - we're not looking to test +// core kinto.js functionality here (there is excellent test coverage in +// kinto.js), more making sure things are basically working as expected. +add_task(async function test_kinto_sync() { + const configPath = "/v1/"; + const metadataPath = "/v1/buckets/default/collections/test_collection"; + const recordsPath = "/v1/buckets/default/collections/test_collection/records"; + // register a handler + function handleResponse(request, response) { + try { + const sampled = getSampleResponse(request, server.identity.primaryPort); + if (!sampled) { + do_throw( + `unexpected ${request.method} request for ${request.path}?${request.queryString}` + ); + } + + response.setStatusLine( + null, + sampled.status.status, + sampled.status.statusText + ); + // send the headers + for (let headerLine of sampled.sampleHeaders) { + let headerElements = headerLine.split(":"); + response.setHeader(headerElements[0], headerElements[1].trimLeft()); + } + response.setHeader("Date", new Date().toUTCString()); + + response.write(sampled.responseBody); + } catch (e) { + dump(`${e}\n`); + } + } + server.registerPathHandler(configPath, handleResponse); + server.registerPathHandler(metadataPath, handleResponse); + server.registerPathHandler(recordsPath, handleResponse); + + // create an empty collection, sync to populate + let sqliteHandle; + try { + let result; + sqliteHandle = await do_get_kinto_sqliteHandle(); + const collection = do_get_kinto_collection(sqliteHandle); + + result = await collection.sync(); + Assert.ok(result.ok); + + // our test data has a single record; it should be in the local collection + let list = await collection.list(); + Assert.equal(list.data.length, 1); + + // now sync again; we should now have 2 records + result = await collection.sync(); + Assert.ok(result.ok); + list = await collection.list(); + Assert.equal(list.data.length, 2); + + // sync again; the second records should have been modified + const before = list.data[0].title; + result = await collection.sync(); + Assert.ok(result.ok); + list = await collection.list(); + const after = list.data[1].title; + Assert.notEqual(before, after); + + const manualID = list.data[0].id; + Assert.equal(list.data.length, 3); + Assert.equal(manualID, "some-manually-chosen-id"); + } finally { + await sqliteHandle.close(); + } +}); + +function run_test() { + // Set up an HTTP Server + server = new HttpServer(); + server.start(-1); + + run_next_test(); + + registerCleanupFunction(function () { + server.stop(function () {}); + }); +} + +// get a response for a given request from sample data +function getSampleResponse(req, port) { + const responses = { + OPTIONS: { + sampleHeaders: [ + "Access-Control-Allow-Headers: Content-Length,Expires,Backoff,Retry-After,Last-Modified,Total-Records,ETag,Pragma,Cache-Control,authorization,content-type,if-none-match,Alert,Next-Page", + "Access-Control-Allow-Methods: GET,HEAD,OPTIONS,POST,DELETE,OPTIONS", + "Access-Control-Allow-Origin: *", + "Content-Type: application/json; charset=UTF-8", + "Server: waitress", + ], + status: { status: 200, statusText: "OK" }, + responseBody: "null", + }, + "GET:/v1/?": { + sampleHeaders: [ + "Access-Control-Allow-Origin: *", + "Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff", + "Content-Type: application/json; charset=UTF-8", + "Server: waitress", + ], + status: { status: 200, statusText: "OK" }, + responseBody: JSON.stringify({ + settings: { + batch_max_requests: 25, + }, + url: `http://localhost:${port}/v1/`, + documentation: "https://kinto.readthedocs.org/", + version: "1.5.1", + commit: "cbc6f58", + hello: "kinto", + }), + }, + "GET:/v1/buckets/default/collections/test_collection": { + sampleHeaders: [ + "Access-Control-Allow-Origin: *", + "Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff", + "Content-Type: application/json; charset=UTF-8", + "Server: waitress", + 'Etag: "1234"', + ], + status: { status: 200, statusText: "OK" }, + responseBody: JSON.stringify({ + data: { + id: "test_collection", + last_modified: 1234, + }, + }), + }, + "GET:/v1/buckets/default/collections/test_collection/records?_sort=-last_modified": + { + sampleHeaders: [ + "Access-Control-Allow-Origin: *", + "Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff", + "Content-Type: application/json; charset=UTF-8", + "Server: waitress", + 'Etag: "1445606341071"', + ], + status: { status: 200, statusText: "OK" }, + responseBody: JSON.stringify({ + data: [ + { + last_modified: 1445606341071, + done: false, + id: "68db8313-686e-4fff-835e-07d78ad6f2af", + title: "New test", + }, + ], + }), + }, + "GET:/v1/buckets/default/collections/test_collection/records?_sort=-last_modified&_since=1445606341071": + { + sampleHeaders: [ + "Access-Control-Allow-Origin: *", + "Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff", + "Content-Type: application/json; charset=UTF-8", + "Server: waitress", + 'Etag: "1445607941223"', + ], + status: { status: 200, statusText: "OK" }, + responseBody: JSON.stringify({ + data: [ + { + last_modified: 1445607941223, + done: false, + id: "901967b0-f729-4b30-8d8d-499cba7f4b1d", + title: "Another new test", + }, + ], + }), + }, + "GET:/v1/buckets/default/collections/test_collection/records?_sort=-last_modified&_since=1445607941223": + { + sampleHeaders: [ + "Access-Control-Allow-Origin: *", + "Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff", + "Content-Type: application/json; charset=UTF-8", + "Server: waitress", + 'Etag: "1445607541267"', + ], + status: { status: 200, statusText: "OK" }, + responseBody: JSON.stringify({ + data: [ + { + last_modified: 1445607541265, + done: false, + id: "901967b0-f729-4b30-8d8d-499cba7f4b1d", + title: "Modified title", + }, + { + last_modified: 1445607541267, + done: true, + id: "some-manually-chosen-id", + title: "New record with custom ID", + }, + ], + }), + }, + }; + return ( + responses[`${req.method}:${req.path}?${req.queryString}`] || + responses[`${req.method}:${req.path}`] || + responses[req.method] + ); +} diff --git a/services/common/tests/unit/test_load_modules.js b/services/common/tests/unit/test_load_modules.js new file mode 100644 index 0000000000..d86165266f --- /dev/null +++ b/services/common/tests/unit/test_load_modules.js @@ -0,0 +1,60 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { AppConstants } = ChromeUtils.importESModule( + "resource://gre/modules/AppConstants.sys.mjs" +); + +const MODULE_BASE = "resource://services-common/"; +const shared_modules = [ + "async.sys.mjs", + "logmanager.sys.mjs", + "rest.sys.mjs", + "utils.sys.mjs", +]; + +const non_android_modules = ["tokenserverclient.sys.mjs"]; + +const TEST_BASE = "resource://testing-common/services/common/"; +const shared_test_modules = ["logging.sys.mjs"]; + +function expectImportsToSucceed(mm, base = MODULE_BASE) { + for (let m of mm) { + let resource = base + m; + let succeeded = false; + try { + ChromeUtils.importESModule(resource); + succeeded = true; + } catch (e) {} + + if (!succeeded) { + throw new Error(`Importing ${resource} should have succeeded!`); + } + } +} + +function expectImportsToFail(mm, base = MODULE_BASE) { + for (let m of mm) { + let resource = base + m; + let succeeded = false; + try { + ChromeUtils.importESModule(resource); + succeeded = true; + } catch (e) {} + + if (succeeded) { + throw new Error(`Importing ${resource} should have failed!`); + } + } +} + +function run_test() { + expectImportsToSucceed(shared_modules); + expectImportsToSucceed(shared_test_modules, TEST_BASE); + + if (AppConstants.platform != "android") { + expectImportsToSucceed(non_android_modules); + } else { + expectImportsToFail(non_android_modules); + } +} diff --git a/services/common/tests/unit/test_logmanager.js b/services/common/tests/unit/test_logmanager.js new file mode 100644 index 0000000000..89ac274e61 --- /dev/null +++ b/services/common/tests/unit/test_logmanager.js @@ -0,0 +1,330 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +// NOTE: The sync test_errorhandler_* tests have quite good coverage for +// other aspects of this. + +const { LogManager } = ChromeUtils.importESModule( + "resource://services-common/logmanager.sys.mjs" +); +const { FileUtils } = ChromeUtils.importESModule( + "resource://gre/modules/FileUtils.sys.mjs" +); + +// Returns an array of [consoleAppender, dumpAppender, [fileAppenders]] for +// the specified log. Note that fileAppenders will usually have length=1 +function getAppenders(log) { + let capps = log.appenders.filter(app => app instanceof Log.ConsoleAppender); + equal(capps.length, 1, "should only have one console appender"); + let dapps = log.appenders.filter(app => app instanceof Log.DumpAppender); + equal(dapps.length, 1, "should only have one dump appender"); + let fapps = log.appenders.filter( + app => app instanceof LogManager.StorageStreamAppender + ); + return [capps[0], dapps[0], fapps]; +} + +// Test that the correct thing happens when no prefs exist for the log manager. +add_task(async function test_noPrefs() { + // tell the log manager to init with a pref branch that doesn't exist. + let lm = new LogManager("no-such-branch.", ["TestLog"], "test"); + + let log = Log.repository.getLogger("TestLog"); + let [capp, dapp, fapps] = getAppenders(log); + // The console appender gets "Fatal" while the "dump" appender gets "Error" levels + equal(capp.level, Log.Level.Fatal); + equal(dapp.level, Log.Level.Error); + // and the file (stream) appender gets Debug by default + equal(fapps.length, 1, "only 1 file appender"); + equal(fapps[0].level, Log.Level.Debug); + lm.finalize(); +}); + +// Test that changes to the prefs used by the log manager are updated dynamically. +add_task(async function test_PrefChanges() { + Services.prefs.setStringPref( + "log-manager.test.log.appender.console", + "Trace" + ); + Services.prefs.setStringPref("log-manager.test.log.appender.dump", "Trace"); + Services.prefs.setStringPref( + "log-manager.test.log.appender.file.level", + "Trace" + ); + let lm = new LogManager("log-manager.test.", ["TestLog2"], "test"); + + let log = Log.repository.getLogger("TestLog2"); + let [capp, dapp, [fapp]] = getAppenders(log); + equal(capp.level, Log.Level.Trace); + equal(dapp.level, Log.Level.Trace); + equal(fapp.level, Log.Level.Trace); + // adjust the prefs and they should magically be reflected in the appenders. + Services.prefs.setStringPref( + "log-manager.test.log.appender.console", + "Debug" + ); + Services.prefs.setStringPref("log-manager.test.log.appender.dump", "Debug"); + Services.prefs.setStringPref( + "log-manager.test.log.appender.file.level", + "Debug" + ); + equal(capp.level, Log.Level.Debug); + equal(dapp.level, Log.Level.Debug); + equal(fapp.level, Log.Level.Debug); + // and invalid values should cause them to fallback to their defaults. + Services.prefs.setStringPref("log-manager.test.log.appender.console", "xxx"); + Services.prefs.setStringPref("log-manager.test.log.appender.dump", "xxx"); + Services.prefs.setStringPref( + "log-manager.test.log.appender.file.level", + "xxx" + ); + equal(capp.level, Log.Level.Fatal); + equal(dapp.level, Log.Level.Error); + equal(fapp.level, Log.Level.Debug); + lm.finalize(); +}); + +// Test that the same log used by multiple log managers does the right thing. +add_task(async function test_SharedLogs() { + // create the prefs for the first instance. + Services.prefs.setStringPref( + "log-manager-1.test.log.appender.console", + "Trace" + ); + Services.prefs.setStringPref("log-manager-1.test.log.appender.dump", "Trace"); + Services.prefs.setStringPref( + "log-manager-1.test.log.appender.file.level", + "Trace" + ); + let lm1 = new LogManager("log-manager-1.test.", ["TestLog3"], "test"); + + // and the second. + Services.prefs.setStringPref( + "log-manager-2.test.log.appender.console", + "Debug" + ); + Services.prefs.setStringPref("log-manager-2.test.log.appender.dump", "Debug"); + Services.prefs.setStringPref( + "log-manager-2.test.log.appender.file.level", + "Debug" + ); + let lm2 = new LogManager("log-manager-2.test.", ["TestLog3"], "test"); + + let log = Log.repository.getLogger("TestLog3"); + let [capp, dapp] = getAppenders(log); + + // console and dump appenders should be "trace" as it is more verbose than + // "debug" + equal(capp.level, Log.Level.Trace); + equal(dapp.level, Log.Level.Trace); + + // Set the prefs on the -1 branch to "Error" - it should then end up with + // "Debug" from the -2 branch. + Services.prefs.setStringPref( + "log-manager-1.test.log.appender.console", + "Error" + ); + Services.prefs.setStringPref("log-manager-1.test.log.appender.dump", "Error"); + Services.prefs.setStringPref( + "log-manager-1.test.log.appender.file.level", + "Error" + ); + + equal(capp.level, Log.Level.Debug); + equal(dapp.level, Log.Level.Debug); + + lm1.finalize(); + lm2.finalize(); +}); + +// A little helper to test what log files exist. We expect exactly zero (if +// prefix is null) or exactly one with the specified prefix. +function checkLogFile(prefix) { + let logsdir = FileUtils.getDir("ProfD", ["weave", "logs"]); + let entries = logsdir.directoryEntries; + if (!prefix) { + // expecting no files. + ok(!entries.hasMoreElements()); + } else { + // expecting 1 file. + ok(entries.hasMoreElements()); + let logfile = entries.getNext().QueryInterface(Ci.nsIFile); + equal(logfile.leafName.slice(-4), ".txt"); + ok(logfile.leafName.startsWith(prefix + "-test-"), logfile.leafName); + // and remove it ready for the next check. + logfile.remove(false); + } +} + +// Test that we correctly write error logs by default +add_task(async function test_logFileErrorDefault() { + let lm = new LogManager("log-manager.test.", ["TestLog2"], "test"); + + let log = Log.repository.getLogger("TestLog2"); + log.error("an error message"); + await lm.resetFileLog(lm.REASON_ERROR); + // One error log file exists. + checkLogFile("error"); + + lm.finalize(); +}); + +// Test that we correctly write success logs. +add_task(async function test_logFileSuccess() { + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnError", + false + ); + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnSuccess", + false + ); + + let lm = new LogManager("log-manager.test.", ["TestLog2"], "test"); + + let log = Log.repository.getLogger("TestLog2"); + log.info("an info message"); + await lm.resetFileLog(); + // Zero log files exist. + checkLogFile(null); + + // Reset logOnSuccess and do it again - log should appear. + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnSuccess", + true + ); + log.info("an info message"); + await lm.resetFileLog(); + + checkLogFile("success"); + + // Now test with no "reason" specified and no "error" record. + log.info("an info message"); + await lm.resetFileLog(); + // should get a "success" entry. + checkLogFile("success"); + + // With no "reason" and an error record - should get no success log. + log.error("an error message"); + await lm.resetFileLog(); + // should get no entry + checkLogFile(null); + + // And finally now with no error, to ensure that the fact we had an error + // previously doesn't persist after the .resetFileLog call. + log.info("an info message"); + await lm.resetFileLog(); + checkLogFile("success"); + + lm.finalize(); +}); + +// Test that we correctly write error logs. +add_task(async function test_logFileError() { + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnError", + false + ); + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnSuccess", + false + ); + + let lm = new LogManager("log-manager.test.", ["TestLog2"], "test"); + + let log = Log.repository.getLogger("TestLog2"); + log.info("an info message"); + let reason = await lm.resetFileLog(); + Assert.equal(reason, null, "null returned when no file created."); + // Zero log files exist. + checkLogFile(null); + + // Reset logOnSuccess - success logs should appear if no error records. + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnSuccess", + true + ); + log.info("an info message"); + reason = await lm.resetFileLog(); + Assert.equal(reason, lm.SUCCESS_LOG_WRITTEN); + checkLogFile("success"); + + // Set logOnError and unset logOnSuccess - error logs should appear. + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnSuccess", + false + ); + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnError", + true + ); + log.error("an error message"); + reason = await lm.resetFileLog(); + Assert.equal(reason, lm.ERROR_LOG_WRITTEN); + checkLogFile("error"); + + // Now test with no "error" record. + log.info("an info message"); + reason = await lm.resetFileLog(); + // should get no file + Assert.equal(reason, null); + checkLogFile(null); + + // With an error record we should get an error log. + log.error("an error message"); + reason = await lm.resetFileLog(); + // should get en error log + Assert.equal(reason, lm.ERROR_LOG_WRITTEN); + checkLogFile("error"); + + // And finally now with success, to ensure that the fact we had an error + // previously doesn't persist after the .resetFileLog call. + log.info("an info message"); + await lm.resetFileLog(); + checkLogFile(null); + + lm.finalize(); +}); + +function countLogFiles() { + let logsdir = FileUtils.getDir("ProfD", ["weave", "logs"]); + let count = 0; + for (let entry of logsdir.directoryEntries) { + void entry; + count += 1; + } + return count; +} + +// Test that removeAllLogs removes all log files. +add_task(async function test_logFileError() { + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnError", + true + ); + Services.prefs.setBoolPref( + "log-manager.test.log.appender.file.logOnSuccess", + true + ); + + let lm = new LogManager("log-manager.test.", ["TestLog2"], "test"); + + let log = Log.repository.getLogger("TestLog2"); + log.info("an info message"); + let reason = await lm.resetFileLog(); + Assert.equal(reason, lm.SUCCESS_LOG_WRITTEN, "success log was written."); + + log.error("an error message"); + reason = await lm.resetFileLog(); + Assert.equal(reason, lm.ERROR_LOG_WRITTEN); + + Assert.equal(countLogFiles(), 2, "expect 2 log files"); + await lm.removeAllLogs(); + Assert.equal( + countLogFiles(), + 0, + "should be no log files after removing them" + ); + + lm.finalize(); +}); diff --git a/services/common/tests/unit/test_observers.js b/services/common/tests/unit/test_observers.js new file mode 100644 index 0000000000..b0fce95e0b --- /dev/null +++ b/services/common/tests/unit/test_observers.js @@ -0,0 +1,82 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { Observers } = ChromeUtils.importESModule( + "resource://services-common/observers.sys.mjs" +); + +var gSubject = {}; + +add_test(function test_function_observer() { + let foo = false; + + let onFoo = function (subject, data) { + foo = !foo; + Assert.equal(subject, gSubject); + Assert.equal(data, "some data"); + }; + + Observers.add("foo", onFoo); + Observers.notify("foo", gSubject, "some data"); + + // The observer was notified after being added. + Assert.ok(foo); + + Observers.remove("foo", onFoo); + Observers.notify("foo"); + + // The observer was not notified after being removed. + Assert.ok(foo); + + run_next_test(); +}); + +add_test(function test_method_observer() { + let obj = { + foo: false, + onFoo(subject, data) { + this.foo = !this.foo; + Assert.equal(subject, gSubject); + Assert.equal(data, "some data"); + }, + }; + + // The observer is notified after being added. + Observers.add("foo", obj.onFoo, obj); + Observers.notify("foo", gSubject, "some data"); + Assert.ok(obj.foo); + + // The observer is not notified after being removed. + Observers.remove("foo", obj.onFoo, obj); + Observers.notify("foo"); + Assert.ok(obj.foo); + + run_next_test(); +}); + +add_test(function test_object_observer() { + let obj = { + foo: false, + observe(subject, topic, data) { + this.foo = !this.foo; + + Assert.equal(subject, gSubject); + Assert.equal(topic, "foo"); + Assert.equal(data, "some data"); + }, + }; + + Observers.add("foo", obj); + Observers.notify("foo", gSubject, "some data"); + + // The observer is notified after being added. + Assert.ok(obj.foo); + + Observers.remove("foo", obj); + Observers.notify("foo"); + + // The observer is not notified after being removed. + Assert.ok(obj.foo); + + run_next_test(); +}); diff --git a/services/common/tests/unit/test_restrequest.js b/services/common/tests/unit/test_restrequest.js new file mode 100644 index 0000000000..9ae5e9429a --- /dev/null +++ b/services/common/tests/unit/test_restrequest.js @@ -0,0 +1,860 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +const { RESTRequest } = ChromeUtils.importESModule( + "resource://services-common/rest.sys.mjs" +); + +function run_test() { + Log.repository.getLogger("Services.Common.RESTRequest").level = + Log.Level.Trace; + initTestLogging("Trace"); + + run_next_test(); +} + +/** + * Initializing a RESTRequest with an invalid URI throws + * NS_ERROR_MALFORMED_URI. + */ +add_test(function test_invalid_uri() { + do_check_throws(function () { + new RESTRequest("an invalid URI"); + }, Cr.NS_ERROR_MALFORMED_URI); + run_next_test(); +}); + +/** + * Verify initial values for attributes. + */ +add_test(function test_attributes() { + let uri = "http://foo.com/bar/baz"; + let request = new RESTRequest(uri); + + Assert.ok(request.uri instanceof Ci.nsIURI); + Assert.equal(request.uri.spec, uri); + Assert.equal(request.response, null); + Assert.equal(request.status, request.NOT_SENT); + let expectedLoadFlags = + Ci.nsIRequest.LOAD_BYPASS_CACHE | + Ci.nsIRequest.INHIBIT_CACHING | + Ci.nsIRequest.LOAD_ANONYMOUS; + Assert.equal(request.loadFlags, expectedLoadFlags); + + run_next_test(); +}); + +/** + * Verify that a proxy auth redirect doesn't break us. This has to be the first + * request made in the file! + */ +add_task(async function test_proxy_auth_redirect() { + let pacFetched = false; + function pacHandler(metadata, response) { + pacFetched = true; + let body = 'function FindProxyForURL(url, host) { return "DIRECT"; }'; + response.setStatusLine(metadata.httpVersion, 200, "OK"); + response.setHeader( + "Content-Type", + "application/x-ns-proxy-autoconfig", + false + ); + response.bodyOutputStream.write(body, body.length); + } + + let fetched = false; + function original(metadata, response) { + fetched = true; + let body = "TADA!"; + response.setStatusLine(metadata.httpVersion, 200, "OK"); + response.bodyOutputStream.write(body, body.length); + } + + let server = httpd_setup({ + "/original": original, + "/pac3": pacHandler, + }); + PACSystemSettings.PACURI = server.baseURI + "/pac3"; + installFakePAC(); + + let req = new RESTRequest(server.baseURI + "/original"); + await req.get(); + + Assert.ok(pacFetched); + Assert.ok(fetched); + + Assert.ok(req.response.success); + Assert.equal("TADA!", req.response.body); + uninstallFakePAC(); + await promiseStopServer(server); +}); + +/** + * Ensure that failures that cause asyncOpen to throw + * result in callbacks being invoked. + * Bug 826086. + */ +add_task(async function test_forbidden_port() { + let request = new RESTRequest("http://localhost:6000/"); + + await Assert.rejects( + request.get(), + error => error.result == Cr.NS_ERROR_PORT_ACCESS_NOT_ALLOWED + ); +}); + +/** + * Demonstrate API short-hand: create a request and dispatch it immediately. + */ +add_task(async function test_simple_get() { + let handler = httpd_handler(200, "OK", "Huzzah!"); + let server = httpd_setup({ "/resource": handler }); + let request = new RESTRequest(server.baseURI + "/resource"); + let promiseResponse = request.get(); + + Assert.equal(request.status, request.SENT); + Assert.equal(request.method, "GET"); + + let response = await promiseResponse; + Assert.equal(response, request.response); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(response.success); + Assert.equal(response.status, 200); + Assert.equal(response.body, "Huzzah!"); + await promiseStopServer(server); +}); + +/** + * Test HTTP GET with all bells and whistles. + */ +add_task(async function test_get() { + let handler = httpd_handler(200, "OK", "Huzzah!"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + Assert.equal(request.status, request.NOT_SENT); + + let promiseResponse = request.get(); + + Assert.equal(request.status, request.SENT); + Assert.equal(request.method, "GET"); + + Assert.ok(!!(request.channel.loadFlags & Ci.nsIRequest.LOAD_BYPASS_CACHE)); + Assert.ok(!!(request.channel.loadFlags & Ci.nsIRequest.INHIBIT_CACHING)); + + let response = await promiseResponse; + + Assert.equal(response, request.response); + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, "Huzzah!"); + Assert.equal(handler.request.method, "GET"); + + await Assert.rejects(request.get(), /Request has already been sent/); + + await promiseStopServer(server); +}); + +/** + * Test HTTP GET with UTF-8 content, and custom Content-Type. + */ +add_task(async function test_get_utf8() { + let response = "Hello World or Καλημέρα κόσμε or こんにちは 世界 😺"; + + let contentType = "text/plain"; + let charset = true; + let charsetSuffix = "; charset=UTF-8"; + + let server = httpd_setup({ + "/resource": function (req, res) { + res.setStatusLine(req.httpVersion, 200, "OK"); + res.setHeader( + "Content-Type", + contentType + (charset ? charsetSuffix : "") + ); + + let converter = Cc[ + "@mozilla.org/intl/converter-output-stream;1" + ].createInstance(Ci.nsIConverterOutputStream); + converter.init(res.bodyOutputStream, "UTF-8"); + converter.writeString(response); + converter.close(); + }, + }); + + // Check if charset in Content-Type is propertly interpreted. + let request1 = new RESTRequest(server.baseURI + "/resource"); + await request1.get(); + + Assert.equal(request1.response.status, 200); + Assert.equal(request1.response.body, response); + Assert.equal( + request1.response.headers["content-type"], + contentType + charsetSuffix + ); + + // Check that we default to UTF-8 if Content-Type doesn't have a charset + charset = false; + let request2 = new RESTRequest(server.baseURI + "/resource"); + await request2.get(); + Assert.equal(request2.response.status, 200); + Assert.equal(request2.response.body, response); + Assert.equal(request2.response.headers["content-type"], contentType); + Assert.equal(request2.response.charset, "utf-8"); + + let request3 = new RESTRequest(server.baseURI + "/resource"); + + // With the test server we tend to get onDataAvailable in chunks of 8192 (in + // real network requests there doesn't appear to be any pattern to the size of + // the data `onDataAvailable` is called with), the smiling cat emoji encodes as + // 4 bytes, and so when utf8 encoded, the `"a" + "😺".repeat(2048)` will not be + // aligned onto a codepoint. + // + // Since 8192 isn't guaranteed and could easily change, the following string is + // a) very long, and b) misaligned on roughly 3/4 of the bytes, as a safety + // measure. + response = ("a" + "😺".repeat(2048)).repeat(10); + + await request3.get(); + + Assert.equal(request3.response.status, 200); + + // Make sure it came through ok, despite the misalignment. + Assert.equal(request3.response.body, response); + + await promiseStopServer(server); +}); + +/** + * Test HTTP POST data is encoded as UTF-8 by default. + */ +add_task(async function test_post_utf8() { + // We setup a handler that responds with exactly what it received. + // Given we've already tested above that responses are correctly utf-8 + // decoded we can surmise that the correct response coming back means the + // input must also have been encoded. + let server = httpd_setup({ + "/echo": function (req, res) { + res.setStatusLine(req.httpVersion, 200, "OK"); + res.setHeader("Content-Type", req.getHeader("content-type")); + // Get the body as bytes and write them back without touching them + let sis = Cc["@mozilla.org/scriptableinputstream;1"].createInstance( + Ci.nsIScriptableInputStream + ); + sis.init(req.bodyInputStream); + let body = sis.read(sis.available()); + sis.close(); + res.write(body); + }, + }); + + let data = { + copyright: "©", + // See the comment in test_get_utf8 about this string. + long: ("a" + "😺".repeat(2048)).repeat(10), + }; + let request1 = new RESTRequest(server.baseURI + "/echo"); + await request1.post(data); + + Assert.equal(request1.response.status, 200); + deepEqual(JSON.parse(request1.response.body), data); + Assert.equal( + request1.response.headers["content-type"], + "application/json; charset=utf-8" + ); + + await promiseStopServer(server); +}); + +/** + * Test more variations of charset handling. + */ +add_task(async function test_charsets() { + let response = "Hello World, I can't speak Russian"; + + let contentType = "text/plain"; + let charset = true; + let charsetSuffix = "; charset=us-ascii"; + + let server = httpd_setup({ + "/resource": function (req, res) { + res.setStatusLine(req.httpVersion, 200, "OK"); + res.setHeader( + "Content-Type", + contentType + (charset ? charsetSuffix : "") + ); + + let converter = Cc[ + "@mozilla.org/intl/converter-output-stream;1" + ].createInstance(Ci.nsIConverterOutputStream); + converter.init(res.bodyOutputStream, "us-ascii"); + converter.writeString(response); + converter.close(); + }, + }); + + // Check that provided charset overrides hint. + let request1 = new RESTRequest(server.baseURI + "/resource"); + request1.charset = "not-a-charset"; + await request1.get(); + Assert.equal(request1.response.status, 200); + Assert.equal(request1.response.body, response); + Assert.equal( + request1.response.headers["content-type"], + contentType + charsetSuffix + ); + Assert.equal(request1.response.charset, "us-ascii"); + + // Check that hint is used if Content-Type doesn't have a charset. + charset = false; + let request2 = new RESTRequest(server.baseURI + "/resource"); + request2.charset = "us-ascii"; + await request2.get(); + + Assert.equal(request2.response.status, 200); + Assert.equal(request2.response.body, response); + Assert.equal(request2.response.headers["content-type"], contentType); + Assert.equal(request2.response.charset, "us-ascii"); + + await promiseStopServer(server); +}); + +/** + * Used for testing PATCH/PUT/POST methods. + */ +async function check_posting_data(method) { + let funcName = method.toLowerCase(); + let handler = httpd_handler(200, "OK", "Got it!"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + Assert.equal(request.status, request.NOT_SENT); + let responsePromise = request[funcName]("Hullo?"); + Assert.equal(request.status, request.SENT); + Assert.equal(request.method, method); + + let response = await responsePromise; + + Assert.equal(response, request.response); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, "Got it!"); + + Assert.equal(handler.request.method, method); + Assert.equal(handler.request.body, "Hullo?"); + Assert.equal(handler.request.getHeader("Content-Type"), "text/plain"); + + await Assert.rejects( + request[funcName]("Hai!"), + /Request has already been sent/ + ); + + await promiseStopServer(server); +} + +/** + * Test HTTP PATCH with a simple string argument and default Content-Type. + */ +add_task(async function test_patch() { + await check_posting_data("PATCH"); +}); + +/** + * Test HTTP PUT with a simple string argument and default Content-Type. + */ +add_task(async function test_put() { + await check_posting_data("PUT"); +}); + +/** + * Test HTTP POST with a simple string argument and default Content-Type. + */ +add_task(async function test_post() { + await check_posting_data("POST"); +}); + +/** + * Test HTTP DELETE. + */ +add_task(async function test_delete() { + let handler = httpd_handler(200, "OK", "Got it!"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + Assert.equal(request.status, request.NOT_SENT); + let responsePromise = request.delete(); + Assert.equal(request.status, request.SENT); + Assert.equal(request.method, "DELETE"); + + let response = await responsePromise; + Assert.equal(response, request.response); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, "Got it!"); + Assert.equal(handler.request.method, "DELETE"); + + await Assert.rejects(request.delete(), /Request has already been sent/); + + await promiseStopServer(server); +}); + +/** + * Test an HTTP response with a non-200 status code. + */ +add_task(async function test_get_404() { + let handler = httpd_handler(404, "Not Found", "Cannae find it!"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + await request.get(); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(!request.response.success); + Assert.equal(request.response.status, 404); + Assert.equal(request.response.body, "Cannae find it!"); + + await promiseStopServer(server); +}); + +/** + * The 'data' argument to PUT, if not a string already, is automatically + * stringified as JSON. + */ +add_task(async function test_put_json() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let sample_data = { + some: "sample_data", + injson: "format", + number: 42, + }; + let request = new RESTRequest(server.baseURI + "/resource"); + await request.put(sample_data); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + Assert.equal(handler.request.method, "PUT"); + Assert.equal(handler.request.body, JSON.stringify(sample_data)); + Assert.equal( + handler.request.getHeader("Content-Type"), + "application/json; charset=utf-8" + ); + + await promiseStopServer(server); +}); + +/** + * The 'data' argument to POST, if not a string already, is automatically + * stringified as JSON. + */ +add_task(async function test_post_json() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let sample_data = { + some: "sample_data", + injson: "format", + number: 42, + }; + let request = new RESTRequest(server.baseURI + "/resource"); + await request.post(sample_data); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + Assert.equal(handler.request.method, "POST"); + Assert.equal(handler.request.body, JSON.stringify(sample_data)); + Assert.equal( + handler.request.getHeader("Content-Type"), + "application/json; charset=utf-8" + ); + + await promiseStopServer(server); +}); + +/** + * The content-type will be text/plain without a charset if the 'data' argument + * to POST is already a string. + */ +add_task(async function test_post_json() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let sample_data = "hello"; + let request = new RESTRequest(server.baseURI + "/resource"); + await request.post(sample_data); + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + Assert.equal(handler.request.method, "POST"); + Assert.equal(handler.request.body, sample_data); + Assert.equal(handler.request.getHeader("Content-Type"), "text/plain"); + + await promiseStopServer(server); +}); + +/** + * HTTP PUT with a custom Content-Type header. + */ +add_task(async function test_put_override_content_type() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + request.setHeader("Content-Type", "application/lolcat"); + await request.put("O HAI!!1!"); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + Assert.equal(handler.request.method, "PUT"); + Assert.equal(handler.request.body, "O HAI!!1!"); + Assert.equal(handler.request.getHeader("Content-Type"), "application/lolcat"); + + await promiseStopServer(server); +}); + +/** + * HTTP POST with a custom Content-Type header. + */ +add_task(async function test_post_override_content_type() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + request.setHeader("Content-Type", "application/lolcat"); + await request.post("O HAI!!1!"); + + Assert.equal(request.status, request.COMPLETED); + Assert.ok(request.response.success); + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + Assert.equal(handler.request.method, "POST"); + Assert.equal(handler.request.body, "O HAI!!1!"); + Assert.equal(handler.request.getHeader("Content-Type"), "application/lolcat"); + + await promiseStopServer(server); +}); + +/** + * No special headers are sent by default on a GET request. + */ +add_task(async function test_get_no_headers() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let ignore_headers = [ + "host", + "user-agent", + "accept", + "accept-language", + "accept-encoding", + "accept-charset", + "keep-alive", + "connection", + "pragma", + "cache-control", + "content-length", + "sec-fetch-dest", + "sec-fetch-mode", + "sec-fetch-site", + "sec-fetch-user", + ]; + let request = new RESTRequest(server.baseURI + "/resource"); + await request.get(); + + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + let server_headers = handler.request.headers; + while (server_headers.hasMoreElements()) { + let header = server_headers.getNext().toString(); + if (!ignore_headers.includes(header)) { + do_throw("Got unexpected header!"); + } + } + + await promiseStopServer(server); +}); + +/** + * Client includes default Accept header in API requests + */ +add_task(async function test_default_accept_headers() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + await request.get(); + + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + let accept_header = handler.request.getHeader("accept"); + + Assert.ok(!accept_header.includes("text/html")); + Assert.ok(!accept_header.includes("application/xhtml+xml")); + Assert.ok(!accept_header.includes("applcation/xml")); + + Assert.ok( + accept_header.includes("application/json") || + accept_header.includes("application/newlines") + ); + + await promiseStopServer(server); +}); + +/** + * Test changing the URI after having created the request. + */ +add_task(async function test_changing_uri() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest("http://localhost:1234/the-wrong-resource"); + request.uri = CommonUtils.makeURI(server.baseURI + "/resource"); + let response = await request.get(); + Assert.equal(response.status, 200); + await promiseStopServer(server); +}); + +/** + * Test setting HTTP request headers. + */ +add_task(async function test_request_setHeader() { + let handler = httpd_handler(200, "OK"); + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + + request.setHeader("X-What-Is-Weave", "awesome"); + request.setHeader("X-WHAT-is-Weave", "more awesomer"); + request.setHeader("Another-Header", "Hello World"); + await request.get(); + + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + Assert.equal(handler.request.getHeader("X-What-Is-Weave"), "more awesomer"); + Assert.equal(handler.request.getHeader("another-header"), "Hello World"); + + await promiseStopServer(server); +}); + +/** + * Test receiving HTTP response headers. + */ +add_task(async function test_response_headers() { + function handler(request, response) { + response.setHeader("X-What-Is-Weave", "awesome"); + response.setHeader("Another-Header", "Hello World"); + response.setStatusLine(request.httpVersion, 200, "OK"); + } + let server = httpd_setup({ "/resource": handler }); + let request = new RESTRequest(server.baseURI + "/resource"); + await request.get(); + + Assert.equal(request.response.status, 200); + Assert.equal(request.response.body, ""); + + Assert.equal(request.response.headers["x-what-is-weave"], "awesome"); + Assert.equal(request.response.headers["another-header"], "Hello World"); + + await promiseStopServer(server); +}); + +/** + * The onComplete() handler gets called in case of any network errors + * (e.g. NS_ERROR_CONNECTION_REFUSED). + */ +add_task(async function test_connection_refused() { + let request = new RESTRequest("http://localhost:1234/resource"); + + // Fail the test if we resolve, return the error if we reject + await Assert.rejects( + request.get(), + error => + error.result == Cr.NS_ERROR_CONNECTION_REFUSED && + error.message == "NS_ERROR_CONNECTION_REFUSED" + ); + + Assert.equal(request.status, request.COMPLETED); +}); + +/** + * Abort a request that just sent off. + */ +add_task(async function test_abort() { + function handler() { + do_throw("Shouldn't have gotten here!"); + } + let server = httpd_setup({ "/resource": handler }); + + let request = new RESTRequest(server.baseURI + "/resource"); + + // Aborting a request that hasn't been sent yet is pointless and will throw. + do_check_throws(function () { + request.abort(); + }); + + let responsePromise = request.get(); + request.abort(); + + // Aborting an already aborted request is pointless and will throw. + do_check_throws(function () { + request.abort(); + }); + + Assert.equal(request.status, request.ABORTED); + + await Assert.rejects(responsePromise, /NS_BINDING_ABORTED/); + + await promiseStopServer(server); +}); + +/** + * A non-zero 'timeout' property specifies the amount of seconds to wait after + * channel activity until the request is automatically canceled. + */ +add_task(async function test_timeout() { + let server = new HttpServer(); + let server_connection; + server._handler.handleResponse = function (connection) { + // This is a handler that doesn't do anything, just keeps the connection + // open, thereby mimicking a timing out connection. We keep a reference to + // the open connection for later so it can be properly disposed of. That's + // why you really only want to make one HTTP request to this server ever. + server_connection = connection; + }; + server.start(); + let identity = server.identity; + let uri = + identity.primaryScheme + + "://" + + identity.primaryHost + + ":" + + identity.primaryPort; + + let request = new RESTRequest(uri + "/resource"); + request.timeout = 0.1; // 100 milliseconds + + await Assert.rejects( + request.get(), + error => error.result == Cr.NS_ERROR_NET_TIMEOUT + ); + + Assert.equal(request.status, request.ABORTED); + + // server_connection is undefined on the Android emulator for reasons + // unknown. Yet, we still get here. If this test is refactored, we should + // investigate the reason why the above callback is behaving differently. + if (server_connection) { + _("Closing connection."); + server_connection.close(); + } + await promiseStopServer(server); +}); + +add_task(async function test_new_channel() { + _("Ensure a redirect to a new channel is handled properly."); + + function checkUA(metadata) { + let ua = metadata.getHeader("User-Agent"); + _("User-Agent is " + ua); + Assert.equal("foo bar", ua); + } + + let redirectRequested = false; + let redirectURL; + function redirectHandler(metadata, response) { + checkUA(metadata); + redirectRequested = true; + + let body = "Redirecting"; + response.setStatusLine(metadata.httpVersion, 307, "TEMPORARY REDIRECT"); + response.setHeader("Location", redirectURL); + response.bodyOutputStream.write(body, body.length); + } + + let resourceRequested = false; + function resourceHandler(metadata, response) { + checkUA(metadata); + resourceRequested = true; + + let body = "Test"; + response.setHeader("Content-Type", "text/plain"); + response.bodyOutputStream.write(body, body.length); + } + + let server1 = httpd_setup({ "/redirect": redirectHandler }); + let server2 = httpd_setup({ "/resource": resourceHandler }); + redirectURL = server2.baseURI + "/resource"; + + let request = new RESTRequest(server1.baseURI + "/redirect"); + request.setHeader("User-Agent", "foo bar"); + + // Swizzle in our own fakery, because this redirect is neither + // internal nor URI-preserving. RESTRequest's policy is to only + // copy headers under certain circumstances. + let protoMethod = request.shouldCopyOnRedirect; + request.shouldCopyOnRedirect = function wrapped(o, n, f) { + // Check the default policy. + Assert.ok(!protoMethod.call(this, o, n, f)); + return true; + }; + + let response = await request.get(); + + Assert.equal(200, response.status); + Assert.equal("Test", response.body); + Assert.ok(redirectRequested); + Assert.ok(resourceRequested); + + await promiseStopServer(server1); + await promiseStopServer(server2); +}); + +add_task(async function test_not_sending_cookie() { + function handler(metadata, response) { + let body = "COOKIE!"; + response.setStatusLine(metadata.httpVersion, 200, "OK"); + response.bodyOutputStream.write(body, body.length); + Assert.ok(!metadata.hasHeader("Cookie")); + } + let server = httpd_setup({ "/test": handler }); + + let uri = CommonUtils.makeURI(server.baseURI); + let channel = NetUtil.newChannel({ + uri, + loadUsingSystemPrincipal: true, + contentPolicyType: Ci.nsIContentPolicy.TYPE_DOCUMENT, + }); + Services.cookies.setCookieStringFromHttp(uri, "test=test; path=/;", channel); + + let res = new RESTRequest(server.baseURI + "/test"); + let response = await res.get(); + + Assert.ok(response.success); + Assert.equal("COOKIE!", response.body); + + await promiseStopServer(server); +}); diff --git a/services/common/tests/unit/test_storage_adapter.js b/services/common/tests/unit/test_storage_adapter.js new file mode 100644 index 0000000000..1dda35ad12 --- /dev/null +++ b/services/common/tests/unit/test_storage_adapter.js @@ -0,0 +1,307 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { Sqlite } = ChromeUtils.importESModule( + "resource://gre/modules/Sqlite.sys.mjs" +); +const { FirefoxAdapter } = ChromeUtils.importESModule( + "resource://services-common/kinto-storage-adapter.sys.mjs" +); + +// set up what we need to make storage adapters +const kintoFilename = "kinto.sqlite"; + +function do_get_kinto_connection() { + return FirefoxAdapter.openConnection({ path: kintoFilename }); +} + +function do_get_kinto_adapter(sqliteHandle) { + return new FirefoxAdapter("test", { sqliteHandle }); +} + +function do_get_kinto_db() { + let profile = do_get_profile(); + let kintoDB = profile.clone(); + kintoDB.append(kintoFilename); + return kintoDB; +} + +function cleanup_kinto() { + add_test(function cleanup_kinto_files() { + let kintoDB = do_get_kinto_db(); + // clean up the db + kintoDB.remove(false); + run_next_test(); + }); +} + +function test_collection_operations() { + add_task(async function test_kinto_clear() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + await adapter.clear(); + await sqliteHandle.close(); + }); + + // test creating new records... and getting them again + add_task(async function test_kinto_create_new_get_existing() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + let record = { id: "test-id", foo: "bar" }; + await adapter.execute(transaction => transaction.create(record)); + let newRecord = await adapter.get("test-id"); + // ensure the record is the same as when it was added + deepEqual(record, newRecord); + await sqliteHandle.close(); + }); + + // test removing records + add_task(async function test_kinto_can_remove_some_records() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + // create a second record + let record = { id: "test-id-2", foo: "baz" }; + await adapter.execute(transaction => transaction.create(record)); + let newRecord = await adapter.get("test-id-2"); + deepEqual(record, newRecord); + // delete the record + await adapter.execute(transaction => transaction.delete(record.id)); + newRecord = await adapter.get(record.id); + // ... and ensure it's no longer there + Assert.equal(newRecord, undefined); + // ensure the other record still exists + newRecord = await adapter.get("test-id"); + Assert.notEqual(newRecord, undefined); + await sqliteHandle.close(); + }); + + // test getting records that don't exist + add_task(async function test_kinto_get_non_existant() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + // Kinto expects adapters to either: + let newRecord = await adapter.get("missing-test-id"); + // resolve with an undefined record + Assert.equal(newRecord, undefined); + await sqliteHandle.close(); + }); + + // test updating records... and getting them again + add_task(async function test_kinto_update_get_existing() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + let originalRecord = { id: "test-id", foo: "bar" }; + let updatedRecord = { id: "test-id", foo: "baz" }; + await adapter.clear(); + await adapter.execute(transaction => transaction.create(originalRecord)); + await adapter.execute(transaction => transaction.update(updatedRecord)); + // ensure the record exists + let newRecord = await adapter.get("test-id"); + // ensure the record is the same as when it was added + deepEqual(updatedRecord, newRecord); + await sqliteHandle.close(); + }); + + // test listing records + add_task(async function test_kinto_list() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + let originalRecord = { id: "test-id-1", foo: "bar" }; + let records = await adapter.list(); + Assert.equal(records.length, 1); + await adapter.execute(transaction => transaction.create(originalRecord)); + records = await adapter.list(); + Assert.equal(records.length, 2); + await sqliteHandle.close(); + }); + + // test aborting transaction + add_task(async function test_kinto_aborting_transaction() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + await adapter.clear(); + let record = { id: 1, foo: "bar" }; + let error = null; + try { + await adapter.execute(transaction => { + transaction.create(record); + throw new Error("unexpected"); + }); + } catch (e) { + error = e; + } + Assert.notEqual(error, null); + let records = await adapter.list(); + Assert.equal(records.length, 0); + await sqliteHandle.close(); + }); + + // test save and get last modified + add_task(async function test_kinto_last_modified() { + const initialValue = 0; + const intendedValue = 12345678; + + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + let lastModified = await adapter.getLastModified(); + Assert.equal(lastModified, initialValue); + let result = await adapter.saveLastModified(intendedValue); + Assert.equal(result, intendedValue); + lastModified = await adapter.getLastModified(); + Assert.equal(lastModified, intendedValue); + + // test saveLastModified parses values correctly + result = await adapter.saveLastModified(" " + intendedValue + " blah"); + // should resolve with the parsed int + Assert.equal(result, intendedValue); + // and should have saved correctly + lastModified = await adapter.getLastModified(); + Assert.equal(lastModified, intendedValue); + await sqliteHandle.close(); + }); + + // test loadDump(records) + add_task(async function test_kinto_import_records() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + let record1 = { id: 1, foo: "bar" }; + let record2 = { id: 2, foo: "baz" }; + let impactedRecords = await adapter.loadDump([record1, record2]); + Assert.equal(impactedRecords.length, 2); + let newRecord1 = await adapter.get("1"); + // ensure the record is the same as when it was added + deepEqual(record1, newRecord1); + let newRecord2 = await adapter.get("2"); + // ensure the record is the same as when it was added + deepEqual(record2, newRecord2); + await sqliteHandle.close(); + }); + + add_task(async function test_kinto_import_records_should_override_existing() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + await adapter.clear(); + let records = await adapter.list(); + Assert.equal(records.length, 0); + let impactedRecords = await adapter.loadDump([ + { id: 1, foo: "bar" }, + { id: 2, foo: "baz" }, + ]); + Assert.equal(impactedRecords.length, 2); + await adapter.loadDump([ + { id: 1, foo: "baz" }, + { id: 3, foo: "bab" }, + ]); + records = await adapter.list(); + Assert.equal(records.length, 3); + let newRecord1 = await adapter.get("1"); + deepEqual(newRecord1.foo, "baz"); + await sqliteHandle.close(); + }); + + add_task(async function test_import_updates_lastModified() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + await adapter.loadDump([ + { id: 1, foo: "bar", last_modified: 1457896541 }, + { id: 2, foo: "baz", last_modified: 1458796542 }, + ]); + let lastModified = await adapter.getLastModified(); + Assert.equal(lastModified, 1458796542); + await sqliteHandle.close(); + }); + + add_task(async function test_import_preserves_older_lastModified() { + let sqliteHandle = await do_get_kinto_connection(); + let adapter = do_get_kinto_adapter(sqliteHandle); + await adapter.saveLastModified(1458796543); + + await adapter.loadDump([ + { id: 1, foo: "bar", last_modified: 1457896541 }, + { id: 2, foo: "baz", last_modified: 1458796542 }, + ]); + let lastModified = await adapter.getLastModified(); + Assert.equal(lastModified, 1458796543); + await sqliteHandle.close(); + }); + + add_task(async function test_save_metadata_preserves_lastModified() { + let sqliteHandle = await do_get_kinto_connection(); + + let adapter = do_get_kinto_adapter(sqliteHandle); + await adapter.saveLastModified(42); + + await adapter.saveMetadata({ id: "col" }); + + let lastModified = await adapter.getLastModified(); + Assert.equal(lastModified, 42); + await sqliteHandle.close(); + }); +} + +// test kinto db setup and operations in various scenarios +// test from scratch - no current existing database +add_test(function test_db_creation() { + add_test(function test_create_from_scratch() { + // ensure the file does not exist in the profile + let kintoDB = do_get_kinto_db(); + Assert.ok(!kintoDB.exists()); + run_next_test(); + }); + + test_collection_operations(); + + cleanup_kinto(); + run_next_test(); +}); + +// this is the closest we can get to a schema version upgrade at v1 - test an +// existing database +add_test(function test_creation_from_empty_db() { + add_test(function test_create_from_empty_db() { + // place an empty kinto db file in the profile + let profile = do_get_profile(); + + let emptyDB = do_get_file("test_storage_adapter/empty.sqlite"); + emptyDB.copyTo(profile, kintoFilename); + + run_next_test(); + }); + + test_collection_operations(); + + cleanup_kinto(); + run_next_test(); +}); + +// test schema version upgrade at v2 +add_test(function test_migration_from_v1_to_v2() { + add_test(function test_migrate_from_v1_to_v2() { + // place an empty kinto db file in the profile + let profile = do_get_profile(); + + let v1DB = do_get_file("test_storage_adapter/v1.sqlite"); + v1DB.copyTo(profile, kintoFilename); + + run_next_test(); + }); + + add_test(async function schema_is_update_from_1_to_2() { + // The `v1.sqlite` has schema version 1. + let sqliteHandle = await Sqlite.openConnection({ path: kintoFilename }); + Assert.equal(await sqliteHandle.getSchemaVersion(), 1); + await sqliteHandle.close(); + + // The `.openConnection()` migrates it to version 2. + sqliteHandle = await FirefoxAdapter.openConnection({ path: kintoFilename }); + Assert.equal(await sqliteHandle.getSchemaVersion(), 2); + await sqliteHandle.close(); + + run_next_test(); + }); + + test_collection_operations(); + + cleanup_kinto(); + run_next_test(); +}); diff --git a/services/common/tests/unit/test_storage_adapter/empty.sqlite b/services/common/tests/unit/test_storage_adapter/empty.sqlite Binary files differnew file mode 100644 index 0000000000..7f295b4146 --- /dev/null +++ b/services/common/tests/unit/test_storage_adapter/empty.sqlite diff --git a/services/common/tests/unit/test_storage_adapter/v1.sqlite b/services/common/tests/unit/test_storage_adapter/v1.sqlite Binary files differnew file mode 100644 index 0000000000..8482b8b31d --- /dev/null +++ b/services/common/tests/unit/test_storage_adapter/v1.sqlite diff --git a/services/common/tests/unit/test_storage_adapter_shutdown.js b/services/common/tests/unit/test_storage_adapter_shutdown.js new file mode 100644 index 0000000000..dce26ce842 --- /dev/null +++ b/services/common/tests/unit/test_storage_adapter_shutdown.js @@ -0,0 +1,28 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { AsyncShutdown } = ChromeUtils.importESModule( + "resource://gre/modules/AsyncShutdown.sys.mjs" +); + +const { FirefoxAdapter } = ChromeUtils.importESModule( + "resource://services-common/kinto-storage-adapter.sys.mjs" +); + +add_task(async function test_sqlite_shutdown() { + const sqliteHandle = await FirefoxAdapter.openConnection({ + path: "kinto.sqlite", + }); + + // Shutdown Sqlite.sys.mjs synchronously. + Services.prefs.setBoolPref("toolkit.asyncshutdown.testing", true); + AsyncShutdown.profileBeforeChange._trigger(); + Services.prefs.clearUserPref("toolkit.asyncshutdown.testing"); + + try { + sqliteHandle.execute("SELECT 1;"); + equal("Should not succeed, connection should be closed.", false); + } catch (e) { + equal(e.message, "Connection is not open."); + } +}); diff --git a/services/common/tests/unit/test_tokenauthenticatedrequest.js b/services/common/tests/unit/test_tokenauthenticatedrequest.js new file mode 100644 index 0000000000..70735894e4 --- /dev/null +++ b/services/common/tests/unit/test_tokenauthenticatedrequest.js @@ -0,0 +1,50 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { CryptoUtils } = ChromeUtils.importESModule( + "resource://services-crypto/utils.sys.mjs" +); +const { TokenAuthenticatedRESTRequest } = ChromeUtils.importESModule( + "resource://services-common/rest.sys.mjs" +); + +function run_test() { + initTestLogging("Trace"); + run_next_test(); +} + +add_task(async function test_authenticated_request() { + _("Ensure that sending a MAC authenticated GET request works as expected."); + + let message = "Great Success!"; + + let id = "eyJleHBpcmVzIjogMTM2NTAxMDg5OC4x"; + let key = "qTZf4ZFpAMpMoeSsX3zVRjiqmNs="; + let method = "GET"; + + let nonce = btoa(CryptoUtils.generateRandomBytesLegacy(16)); + let ts = Math.floor(Date.now() / 1000); + let extra = { ts, nonce }; + + let auth; + + let server = httpd_setup({ + "/foo": function (request, response) { + Assert.ok(request.hasHeader("Authorization")); + Assert.equal(auth, request.getHeader("Authorization")); + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.bodyOutputStream.write(message, message.length); + }, + }); + let uri = CommonUtils.makeURI(server.baseURI + "/foo"); + let sig = await CryptoUtils.computeHTTPMACSHA1(id, key, method, uri, extra); + auth = sig.getHeader(); + + let req = new TokenAuthenticatedRESTRequest(uri, { id, key }, extra); + await req.get(); + + Assert.equal(message, req.response.body); + + await promiseStopServer(server); +}); diff --git a/services/common/tests/unit/test_tokenserverclient.js b/services/common/tests/unit/test_tokenserverclient.js new file mode 100644 index 0000000000..c1c47f6e1a --- /dev/null +++ b/services/common/tests/unit/test_tokenserverclient.js @@ -0,0 +1,382 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { + TokenServerClient, + TokenServerClientError, + TokenServerClientServerError, +} = ChromeUtils.importESModule( + "resource://services-common/tokenserverclient.sys.mjs" +); + +initTestLogging("Trace"); + +add_task(async function test_working_token_exchange() { + _("Ensure that working OAuth token exchange works as expected."); + + let service = "http://example.com/foo"; + let duration = 300; + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + Assert.ok(request.hasHeader("accept")); + Assert.equal("application/json", request.getHeader("accept")); + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json"); + + let body = JSON.stringify({ + id: "id", + key: "key", + api_endpoint: service, + uid: "uid", + duration, + }); + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + let result = await client.getTokenUsingOAuth(url, "access_token"); + Assert.equal("object", typeof result); + do_check_attribute_count(result, 7); + Assert.equal(service, result.endpoint); + Assert.equal("id", result.id); + Assert.equal("key", result.key); + Assert.equal("uid", result.uid); + Assert.equal(duration, result.duration); + Assert.deepEqual(undefined, result.node_type); + await promiseStopServer(server); +}); + +add_task(async function test_working_token_exchange_with_nodetype() { + _("Ensure that a token response with a node type as expected."); + + let service = "http://example.com/foo"; + let duration = 300; + let nodeType = "the-node-type"; + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + Assert.ok(request.hasHeader("accept")); + Assert.equal("application/json", request.getHeader("accept")); + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json"); + + let body = JSON.stringify({ + id: "id", + key: "key", + api_endpoint: service, + uid: "uid", + duration, + node_type: nodeType, + }); + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + let result = await client.getTokenUsingOAuth(url, "access_token"); + Assert.equal("object", typeof result); + do_check_attribute_count(result, 7); + Assert.equal(service, result.endpoint); + Assert.equal("id", result.id); + Assert.equal("key", result.key); + Assert.equal("uid", result.uid); + Assert.equal(duration, result.duration); + Assert.equal(nodeType, result.node_type); + await promiseStopServer(server); +}); + +add_task(async function test_invalid_arguments() { + _("Ensure invalid arguments to APIs are rejected."); + + let args = [ + [null, "access_token"], + ["http://example.com/", null], + ]; + + for (let arg of args) { + let client = new TokenServerClient(); + await Assert.rejects(client.getTokenUsingOAuth(arg[0], arg[1]), ex => { + Assert.ok(ex instanceof TokenServerClientError); + return true; + }); + } +}); + +add_task(async function test_invalid_403_no_content_type() { + _("Ensure that a 403 without content-type is handled properly."); + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + response.setStatusLine(request.httpVersion, 403, "Forbidden"); + // No Content-Type header by design. + + let body = JSON.stringify({ + errors: [{ description: "irrelevant", location: "body", name: "" }], + urls: { foo: "http://bar" }, + }); + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + + await Assert.rejects( + client.getTokenUsingOAuth(url, "access_token"), + error => { + Assert.ok(error instanceof TokenServerClientServerError); + Assert.equal(error.cause, "malformed-response"); + + Assert.equal(null, error.urls); + return true; + } + ); + + await promiseStopServer(server); +}); + +add_task(async function test_send_extra_headers() { + _("Ensures that the condition acceptance header is sent when asked."); + + let duration = 300; + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + Assert.ok(request.hasHeader("x-foo")); + Assert.equal(request.getHeader("x-foo"), "42"); + + Assert.ok(request.hasHeader("x-bar")); + Assert.equal(request.getHeader("x-bar"), "17"); + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json"); + + let body = JSON.stringify({ + id: "id", + key: "key", + api_endpoint: "http://example.com/", + uid: "uid", + duration, + }); + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + + let extra = { + "X-Foo": 42, + "X-Bar": 17, + }; + + await client.getTokenUsingOAuth(url, "access_token", extra); + // Other tests validate other things. + + await promiseStopServer(server); +}); + +add_task(async function test_error_404_empty() { + _("Ensure that 404 responses without proper response are handled properly."); + + let server = httpd_setup(); + + let client = new TokenServerClient(); + let url = server.baseURI + "/foo"; + + await Assert.rejects( + client.getTokenUsingOAuth(url, "access_token"), + error => { + Assert.ok(error instanceof TokenServerClientServerError); + Assert.equal(error.cause, "malformed-response"); + + Assert.notEqual(null, error.response); + return true; + } + ); + + await promiseStopServer(server); +}); + +add_task(async function test_error_404_proper_response() { + _("Ensure that a Cornice error report for 404 is handled properly."); + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + response.setStatusLine(request.httpVersion, 404, "Not Found"); + response.setHeader("Content-Type", "application/json; charset=utf-8"); + + let body = JSON.stringify({ + status: 404, + errors: [{ description: "No service", location: "body", name: "" }], + }); + + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + + await Assert.rejects( + client.getTokenUsingOAuth(url, "access_token"), + error => { + Assert.ok(error instanceof TokenServerClientServerError); + Assert.equal(error.cause, "unknown-service"); + return true; + } + ); + + await promiseStopServer(server); +}); + +add_task(async function test_bad_json() { + _("Ensure that malformed JSON is handled properly."); + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json"); + + let body = '{"id": "id", baz}'; + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + + await Assert.rejects( + client.getTokenUsingOAuth(url, "access_token"), + error => { + Assert.notEqual(null, error); + Assert.equal("TokenServerClientServerError", error.name); + Assert.equal(error.cause, "malformed-response"); + Assert.notEqual(null, error.response); + return true; + } + ); + + await promiseStopServer(server); +}); + +add_task(async function test_400_response() { + _("Ensure HTTP 400 is converted to malformed-request."); + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + response.setStatusLine(request.httpVersion, 400, "Bad Request"); + response.setHeader("Content-Type", "application/json; charset=utf-8"); + + let body = "{}"; // Actual content may not be used. + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + + await Assert.rejects( + client.getTokenUsingOAuth(url, "access_token"), + error => { + Assert.notEqual(null, error); + Assert.equal("TokenServerClientServerError", error.name); + Assert.notEqual(null, error.response); + Assert.equal(error.cause, "malformed-request"); + return true; + } + ); + + await promiseStopServer(server); +}); + +add_task(async function test_401_with_error_cause() { + _("Ensure 401 cause is specified in body.status"); + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + response.setStatusLine(request.httpVersion, 401, "Unauthorized"); + response.setHeader("Content-Type", "application/json; charset=utf-8"); + + let body = JSON.stringify({ status: "no-soup-for-you" }); + response.bodyOutputStream.write(body, body.length); + }, + }); + + let client = new TokenServerClient(); + let url = server.baseURI + "/1.0/foo/1.0"; + + await Assert.rejects( + client.getTokenUsingOAuth(url, "access_token"), + error => { + Assert.notEqual(null, error); + Assert.equal("TokenServerClientServerError", error.name); + Assert.notEqual(null, error.response); + Assert.equal(error.cause, "no-soup-for-you"); + return true; + } + ); + + await promiseStopServer(server); +}); + +add_task(async function test_unhandled_media_type() { + _("Ensure that unhandled media types throw an error."); + + let server = httpd_setup({ + "/1.0/foo/1.0": function (request, response) { + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "text/plain"); + + let body = "hello, world"; + response.bodyOutputStream.write(body, body.length); + }, + }); + + let url = server.baseURI + "/1.0/foo/1.0"; + let client = new TokenServerClient(); + + await Assert.rejects( + client.getTokenUsingOAuth(url, "access_token"), + error => { + Assert.notEqual(null, error); + Assert.equal("TokenServerClientServerError", error.name); + Assert.notEqual(null, error.response); + return true; + } + ); + + await promiseStopServer(server); +}); + +add_task(async function test_rich_media_types() { + _("Ensure that extra tokens in the media type aren't rejected."); + + let duration = 300; + let server = httpd_setup({ + "/foo": function (request, response) { + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader("Content-Type", "application/json; foo=bar; bar=foo"); + + let body = JSON.stringify({ + id: "id", + key: "key", + api_endpoint: "foo", + uid: "uid", + duration, + }); + response.bodyOutputStream.write(body, body.length); + }, + }); + + let url = server.baseURI + "/foo"; + let client = new TokenServerClient(); + + await client.getTokenUsingOAuth(url, "access_token"); + await promiseStopServer(server); +}); diff --git a/services/common/tests/unit/test_uptake_telemetry.js b/services/common/tests/unit/test_uptake_telemetry.js new file mode 100644 index 0000000000..24967b641f --- /dev/null +++ b/services/common/tests/unit/test_uptake_telemetry.js @@ -0,0 +1,121 @@ +const { TelemetryTestUtils } = ChromeUtils.importESModule( + "resource://testing-common/TelemetryTestUtils.sys.mjs" +); +const { UptakeTelemetry } = ChromeUtils.importESModule( + "resource://services-common/uptake-telemetry.sys.mjs" +); + +const COMPONENT = "remotesettings"; + +async function withFakeClientID(uuid, f) { + const { Policy } = ChromeUtils.importESModule( + "resource://services-common/uptake-telemetry.sys.mjs" + ); + let oldGetClientID = Policy.getClientID; + Policy._clientIDHash = null; + Policy.getClientID = () => Promise.resolve(uuid); + try { + return await f(); + } finally { + Policy.getClientID = oldGetClientID; + } +} + +add_task(async function test_unknown_status_is_not_reported() { + const source = "update-source"; + const startSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + + try { + await UptakeTelemetry.report(COMPONENT, "unknown-status", { source }); + } catch (e) {} + + const endSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + Assert.deepEqual(startSnapshot, endSnapshot); +}); + +add_task(async function test_age_is_converted_to_string_and_reported() { + const status = UptakeTelemetry.STATUS.SUCCESS; + const age = 42; + + await withFakeChannel("nightly", async () => { + await UptakeTelemetry.report(COMPONENT, status, { source: "s", age }); + }); + + TelemetryTestUtils.assertEvents([ + [ + "uptake.remotecontent.result", + "uptake", + COMPONENT, + status, + { source: "s", age: `${age}` }, + ], + ]); +}); + +add_task(async function test_each_status_can_be_caught_in_snapshot() { + const source = "some-source"; + const startSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + + const expectedIncrements = {}; + await withFakeChannel("nightly", async () => { + for (const status of Object.values(UptakeTelemetry.STATUS)) { + expectedIncrements[status] = 1; + await UptakeTelemetry.report(COMPONENT, status, { source }); + } + }); + + const endSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + checkUptakeTelemetry(startSnapshot, endSnapshot, expectedIncrements); +}); + +add_task(async function test_events_are_sent_when_hash_is_mod_0() { + const source = "some-source"; + const startSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + const startSuccess = startSnapshot.success || 0; + const uuid = "d81bbfad-d741-41f5-a7e6-29f6bde4972a"; // hash % 100 = 0 + await withFakeClientID(uuid, async () => { + await withFakeChannel("release", async () => { + await UptakeTelemetry.report(COMPONENT, UptakeTelemetry.STATUS.SUCCESS, { + source, + }); + }); + }); + const endSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + Assert.equal(endSnapshot.success, startSuccess + 1); +}); + +add_task( + async function test_events_are_not_sent_when_hash_is_greater_than_pref() { + const source = "some-source"; + const startSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + const startSuccess = startSnapshot.success || 0; + const uuid = "d81bbfad-d741-41f5-a7e6-29f6bde49721"; // hash % 100 = 1 + await withFakeClientID(uuid, async () => { + await withFakeChannel("release", async () => { + await UptakeTelemetry.report( + COMPONENT, + UptakeTelemetry.STATUS.SUCCESS, + { source } + ); + }); + }); + const endSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + Assert.equal(endSnapshot.success || 0, startSuccess); + } +); + +add_task(async function test_events_are_sent_when_nightly() { + const source = "some-source"; + const startSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + const startSuccess = startSnapshot.success || 0; + const uuid = "d81bbfad-d741-41f5-a7e6-29f6bde49721"; // hash % 100 = 1 + await withFakeClientID(uuid, async () => { + await withFakeChannel("nightly", async () => { + await UptakeTelemetry.report(COMPONENT, UptakeTelemetry.STATUS.SUCCESS, { + source, + }); + }); + }); + const endSnapshot = getUptakeTelemetrySnapshot(COMPONENT, source); + Assert.equal(endSnapshot.success, startSuccess + 1); +}); diff --git a/services/common/tests/unit/test_utils_atob.js b/services/common/tests/unit/test_utils_atob.js new file mode 100644 index 0000000000..9e24b56f6f --- /dev/null +++ b/services/common/tests/unit/test_utils_atob.js @@ -0,0 +1,9 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +function run_test() { + let data = ["Zm9vYmE=", "Zm9vYmE==", "Zm9vYmE==="]; + for (let d in data) { + Assert.equal(CommonUtils.safeAtoB(data[d]), "fooba"); + } +} diff --git a/services/common/tests/unit/test_utils_convert_string.js b/services/common/tests/unit/test_utils_convert_string.js new file mode 100644 index 0000000000..fcfd874994 --- /dev/null +++ b/services/common/tests/unit/test_utils_convert_string.js @@ -0,0 +1,146 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +// A wise line of Greek verse, and the utf-8 byte encoding. +// N.b., Greek begins at utf-8 ce 91 +const TEST_STR = "πόλλ' οἶδ' ἀλώπηξ, ἀλλ' ἐχῖνος ἓν μέγα"; +const TEST_HEX = h( + "cf 80 cf 8c ce bb ce bb 27 20 ce bf e1 bc b6 ce" + + "b4 27 20 e1 bc 80 ce bb cf 8e cf 80 ce b7 ce be" + + "2c 20 e1 bc 80 ce bb ce bb 27 20 e1 bc 90 cf 87" + + "e1 bf 96 ce bd ce bf cf 82 20 e1 bc 93 ce bd 20" + + "ce bc ce ad ce b3 ce b1" +); +// Integer byte values for the above +const TEST_BYTES = [ + 207, 128, 207, 140, 206, 187, 206, 187, 39, 32, 206, 191, 225, 188, 182, 206, + 180, 39, 32, 225, 188, 128, 206, 187, 207, 142, 207, 128, 206, 183, 206, 190, + 44, 32, 225, 188, 128, 206, 187, 206, 187, 39, 32, 225, 188, 144, 207, 135, + 225, 191, 150, 206, 189, 206, 191, 207, 130, 32, 225, 188, 147, 206, 189, 32, + 206, 188, 206, 173, 206, 179, 206, 177, +]; + +add_test(function test_compress_string() { + const INPUT = "hello"; + + let result = CommonUtils.convertString(INPUT, "uncompressed", "deflate"); + Assert.equal(result.length, 13); + + let result2 = CommonUtils.convertString(INPUT, "uncompressed", "deflate"); + Assert.equal(result, result2); + + let result3 = CommonUtils.convertString(result, "deflate", "uncompressed"); + Assert.equal(result3, INPUT); + + run_next_test(); +}); + +add_test(function test_compress_utf8() { + const INPUT = + "Árvíztűrő tükörfúrógép いろはにほへとちりぬるを Pijamalı hasta, yağız şoföre çabucak güvendi."; + let inputUTF8 = CommonUtils.encodeUTF8(INPUT); + + let compressed = CommonUtils.convertString( + inputUTF8, + "uncompressed", + "deflate" + ); + let uncompressed = CommonUtils.convertString( + compressed, + "deflate", + "uncompressed" + ); + + Assert.equal(uncompressed, inputUTF8); + + let outputUTF8 = CommonUtils.decodeUTF8(uncompressed); + Assert.equal(outputUTF8, INPUT); + + run_next_test(); +}); + +add_test(function test_bad_argument() { + let failed = false; + try { + CommonUtils.convertString(null, "uncompressed", "deflate"); + } catch (ex) { + failed = true; + Assert.ok(ex.message.startsWith("Input string must be defined")); + } finally { + Assert.ok(failed); + } + + run_next_test(); +}); + +add_task(function test_stringAsHex() { + Assert.equal(TEST_HEX, CommonUtils.stringAsHex(TEST_STR)); +}); + +add_task(function test_hexAsString() { + Assert.equal(TEST_STR, CommonUtils.hexAsString(TEST_HEX)); +}); + +add_task(function test_hexToBytes() { + let bytes = CommonUtils.hexToBytes(TEST_HEX); + Assert.equal(TEST_BYTES.length, bytes.length); + // Ensure that the decimal values of each byte are correct + Assert.ok(arraysEqual(TEST_BYTES, CommonUtils.stringToByteArray(bytes))); +}); + +add_task(function test_bytesToHex() { + // Create a list of our character bytes from the reference int values + let bytes = CommonUtils.byteArrayToString(TEST_BYTES); + Assert.equal(TEST_HEX, CommonUtils.bytesAsHex(bytes)); +}); + +add_task(function test_stringToBytes() { + Assert.ok( + arraysEqual( + TEST_BYTES, + CommonUtils.stringToByteArray(CommonUtils.stringToBytes(TEST_STR)) + ) + ); +}); + +add_task(function test_stringRoundTrip() { + Assert.equal( + TEST_STR, + CommonUtils.hexAsString(CommonUtils.stringAsHex(TEST_STR)) + ); +}); + +add_task(function test_hexRoundTrip() { + Assert.equal( + TEST_HEX, + CommonUtils.stringAsHex(CommonUtils.hexAsString(TEST_HEX)) + ); +}); + +add_task(function test_byteArrayRoundTrip() { + Assert.ok( + arraysEqual( + TEST_BYTES, + CommonUtils.stringToByteArray(CommonUtils.byteArrayToString(TEST_BYTES)) + ) + ); +}); + +// turn formatted test vectors into normal hex strings +function h(hexStr) { + return hexStr.replace(/\s+/g, ""); +} + +function arraysEqual(a1, a2) { + if (a1.length !== a2.length) { + return false; + } + for (let i = 0; i < a1.length; i++) { + if (a1[i] !== a2[i]) { + return false; + } + } + return true; +} diff --git a/services/common/tests/unit/test_utils_dateprefs.js b/services/common/tests/unit/test_utils_dateprefs.js new file mode 100644 index 0000000000..35b0c26470 --- /dev/null +++ b/services/common/tests/unit/test_utils_dateprefs.js @@ -0,0 +1,77 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +var prefs = Services.prefs.getBranch("servicescommon.tests."); + +function DummyLogger() { + this.messages = []; +} +DummyLogger.prototype.warn = function warn(message) { + this.messages.push(message); +}; + +add_test(function test_set_basic() { + let now = new Date(); + + CommonUtils.setDatePref(prefs, "test00", now); + let value = prefs.getStringPref("test00"); + Assert.equal(value, "" + now.getTime()); + + let now2 = CommonUtils.getDatePref(prefs, "test00"); + + Assert.equal(now.getTime(), now2.getTime()); + + run_next_test(); +}); + +add_test(function test_set_bounds_checking() { + let d = new Date(2342354); + + let failed = false; + try { + CommonUtils.setDatePref(prefs, "test01", d); + } catch (ex) { + Assert.ok(ex.message.startsWith("Trying to set")); + failed = true; + } + + Assert.ok(failed); + run_next_test(); +}); + +add_test(function test_get_bounds_checking() { + prefs.setStringPref("test_bounds_checking", "13241431"); + + let log = new DummyLogger(); + let d = CommonUtils.getDatePref(prefs, "test_bounds_checking", 0, log); + Assert.equal(d.getTime(), 0); + Assert.equal(log.messages.length, 1); + + run_next_test(); +}); + +add_test(function test_get_bad_default() { + let failed = false; + try { + CommonUtils.getDatePref(prefs, "get_bad_default", new Date()); + } catch (ex) { + Assert.ok(ex.message.startsWith("Default value is not a number")); + failed = true; + } + + Assert.ok(failed); + run_next_test(); +}); + +add_test(function test_get_invalid_number() { + prefs.setStringPref("get_invalid_number", "hello world"); + + let log = new DummyLogger(); + let d = CommonUtils.getDatePref(prefs, "get_invalid_number", 42, log); + Assert.equal(d.getTime(), 42); + Assert.equal(log.messages.length, 1); + + run_next_test(); +}); diff --git a/services/common/tests/unit/test_utils_encodeBase32.js b/services/common/tests/unit/test_utils_encodeBase32.js new file mode 100644 index 0000000000..3299ac8d05 --- /dev/null +++ b/services/common/tests/unit/test_utils_encodeBase32.js @@ -0,0 +1,55 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +function run_test() { + // Testing byte array manipulation. + Assert.equal( + "FOOBAR", + CommonUtils.byteArrayToString([70, 79, 79, 66, 65, 82]) + ); + Assert.equal("", CommonUtils.byteArrayToString([])); + + _("Testing encoding..."); + // Test vectors from RFC 4648 + Assert.equal(CommonUtils.encodeBase32(""), ""); + Assert.equal(CommonUtils.encodeBase32("f"), "MY======"); + Assert.equal(CommonUtils.encodeBase32("fo"), "MZXQ===="); + Assert.equal(CommonUtils.encodeBase32("foo"), "MZXW6==="); + Assert.equal(CommonUtils.encodeBase32("foob"), "MZXW6YQ="); + Assert.equal(CommonUtils.encodeBase32("fooba"), "MZXW6YTB"); + Assert.equal(CommonUtils.encodeBase32("foobar"), "MZXW6YTBOI======"); + + Assert.equal( + CommonUtils.encodeBase32("Bacon is a vegetable."), + "IJQWG33OEBUXGIDBEB3GKZ3FORQWE3DFFY======" + ); + + _("Checking assumptions..."); + for (let i = 0; i <= 255; ++i) { + Assert.equal(undefined | i, i); + } + + _("Testing decoding..."); + Assert.equal(CommonUtils.decodeBase32(""), ""); + Assert.equal(CommonUtils.decodeBase32("MY======"), "f"); + Assert.equal(CommonUtils.decodeBase32("MZXQ===="), "fo"); + Assert.equal(CommonUtils.decodeBase32("MZXW6YTB"), "fooba"); + Assert.equal(CommonUtils.decodeBase32("MZXW6YTBOI======"), "foobar"); + + // Same with incorrect or missing padding. + Assert.equal(CommonUtils.decodeBase32("MZXW6YTBOI=="), "foobar"); + Assert.equal(CommonUtils.decodeBase32("MZXW6YTBOI"), "foobar"); + + let encoded = CommonUtils.encodeBase32("Bacon is a vegetable."); + _("Encoded to " + JSON.stringify(encoded)); + Assert.equal(CommonUtils.decodeBase32(encoded), "Bacon is a vegetable."); + + // Test failure. + let err; + try { + CommonUtils.decodeBase32("000"); + } catch (ex) { + err = ex; + } + Assert.equal(err.message, "Unknown character in base32: 0"); +} diff --git a/services/common/tests/unit/test_utils_encodeBase64URL.js b/services/common/tests/unit/test_utils_encodeBase64URL.js new file mode 100644 index 0000000000..84d98b06f1 --- /dev/null +++ b/services/common/tests/unit/test_utils_encodeBase64URL.js @@ -0,0 +1,21 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +add_test(function test_simple() { + let expected = { + hello: "aGVsbG8=", + "<>?": "PD4_", + }; + + for (let [k, v] of Object.entries(expected)) { + Assert.equal(CommonUtils.encodeBase64URL(k), v); + } + + run_next_test(); +}); + +add_test(function test_no_padding() { + Assert.equal(CommonUtils.encodeBase64URL("hello", false), "aGVsbG8"); + + run_next_test(); +}); diff --git a/services/common/tests/unit/test_utils_ensureMillisecondsTimestamp.js b/services/common/tests/unit/test_utils_ensureMillisecondsTimestamp.js new file mode 100644 index 0000000000..f334364b2d --- /dev/null +++ b/services/common/tests/unit/test_utils_ensureMillisecondsTimestamp.js @@ -0,0 +1,40 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +function run_test() { + Assert.equal(null, CommonUtils.ensureMillisecondsTimestamp(null)); + Assert.equal(null, CommonUtils.ensureMillisecondsTimestamp(0)); + Assert.equal(null, CommonUtils.ensureMillisecondsTimestamp("0")); + Assert.equal(null, CommonUtils.ensureMillisecondsTimestamp("000")); + + Assert.equal( + null, + CommonUtils.ensureMillisecondsTimestamp(999 * 10000000000) + ); + + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp(-1); + }); + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp(1); + }); + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp(1.5); + }); + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp(999 * 10000000000 + 0.5); + }); + + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp("-1"); + }); + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp("1"); + }); + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp("1.5"); + }); + do_check_throws(function err() { + CommonUtils.ensureMillisecondsTimestamp("" + (999 * 10000000000 + 0.5)); + }); +} diff --git a/services/common/tests/unit/test_utils_makeURI.js b/services/common/tests/unit/test_utils_makeURI.js new file mode 100644 index 0000000000..d6d7e19db9 --- /dev/null +++ b/services/common/tests/unit/test_utils_makeURI.js @@ -0,0 +1,62 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +_("Make sure uri strings are converted to nsIURIs"); + +function run_test() { + _test_makeURI(); +} + +function _test_makeURI() { + _("Check http uris"); + let uri1 = "http://mozillalabs.com/"; + Assert.equal(CommonUtils.makeURI(uri1).spec, uri1); + let uri2 = "http://www.mozillalabs.com/"; + Assert.equal(CommonUtils.makeURI(uri2).spec, uri2); + let uri3 = "http://mozillalabs.com/path"; + Assert.equal(CommonUtils.makeURI(uri3).spec, uri3); + let uri4 = "http://mozillalabs.com/multi/path"; + Assert.equal(CommonUtils.makeURI(uri4).spec, uri4); + let uri5 = "http://mozillalabs.com/?query"; + Assert.equal(CommonUtils.makeURI(uri5).spec, uri5); + let uri6 = "http://mozillalabs.com/#hash"; + Assert.equal(CommonUtils.makeURI(uri6).spec, uri6); + + _("Check https uris"); + let uris1 = "https://mozillalabs.com/"; + Assert.equal(CommonUtils.makeURI(uris1).spec, uris1); + let uris2 = "https://www.mozillalabs.com/"; + Assert.equal(CommonUtils.makeURI(uris2).spec, uris2); + let uris3 = "https://mozillalabs.com/path"; + Assert.equal(CommonUtils.makeURI(uris3).spec, uris3); + let uris4 = "https://mozillalabs.com/multi/path"; + Assert.equal(CommonUtils.makeURI(uris4).spec, uris4); + let uris5 = "https://mozillalabs.com/?query"; + Assert.equal(CommonUtils.makeURI(uris5).spec, uris5); + let uris6 = "https://mozillalabs.com/#hash"; + Assert.equal(CommonUtils.makeURI(uris6).spec, uris6); + + _("Check chrome uris"); + let uric1 = "chrome://browser/content/browser.xhtml"; + Assert.equal(CommonUtils.makeURI(uric1).spec, uric1); + let uric2 = "chrome://browser/skin/browser.css"; + Assert.equal(CommonUtils.makeURI(uric2).spec, uric2); + + _("Check about uris"); + let uria1 = "about:weave"; + Assert.equal(CommonUtils.makeURI(uria1).spec, uria1); + let uria2 = "about:weave/"; + Assert.equal(CommonUtils.makeURI(uria2).spec, uria2); + let uria3 = "about:weave/path"; + Assert.equal(CommonUtils.makeURI(uria3).spec, uria3); + let uria4 = "about:weave/multi/path"; + Assert.equal(CommonUtils.makeURI(uria4).spec, uria4); + let uria5 = "about:weave/?query"; + Assert.equal(CommonUtils.makeURI(uria5).spec, uria5); + let uria6 = "about:weave/#hash"; + Assert.equal(CommonUtils.makeURI(uria6).spec, uria6); + + _("Invalid uris are undefined"); + Assert.equal(CommonUtils.makeURI("mozillalabs.com"), undefined); + Assert.equal(CommonUtils.makeURI("this is a test"), undefined); +} diff --git a/services/common/tests/unit/test_utils_namedTimer.js b/services/common/tests/unit/test_utils_namedTimer.js new file mode 100644 index 0000000000..47ac0b9dc1 --- /dev/null +++ b/services/common/tests/unit/test_utils_namedTimer.js @@ -0,0 +1,73 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +add_test(function test_required_args() { + try { + CommonUtils.namedTimer(function callback() { + do_throw("Shouldn't fire."); + }, 0); + do_throw("Should have thrown!"); + } catch (ex) { + run_next_test(); + } +}); + +add_test(function test_simple() { + _("Test basic properties of CommonUtils.namedTimer."); + + const delay = 200; + let that = {}; + let t0 = Date.now(); + CommonUtils.namedTimer( + function callback(timer) { + Assert.equal(this, that); + Assert.equal(this._zetimer, null); + Assert.ok(timer instanceof Ci.nsITimer); + // Difference should be ~delay, but hard to predict on all platforms, + // particularly Windows XP. + Assert.ok(Date.now() > t0); + run_next_test(); + }, + delay, + that, + "_zetimer" + ); +}); + +add_test(function test_delay() { + _("Test delaying a timer that hasn't fired yet."); + + const delay = 100; + let that = {}; + let t0 = Date.now(); + function callback(timer) { + // Difference should be ~2*delay, but hard to predict on all platforms, + // particularly Windows XP. + Assert.ok(Date.now() - t0 > delay); + run_next_test(); + } + CommonUtils.namedTimer(callback, delay, that, "_zetimer"); + CommonUtils.namedTimer(callback, 2 * delay, that, "_zetimer"); + run_next_test(); +}); + +add_test(function test_clear() { + _("Test clearing a timer that hasn't fired yet."); + + const delay = 0; + let that = {}; + CommonUtils.namedTimer( + function callback(timer) { + do_throw("Shouldn't fire!"); + }, + delay, + that, + "_zetimer" + ); + + that._zetimer.clear(); + Assert.equal(that._zetimer, null); + CommonUtils.nextTick(run_next_test); + + run_next_test(); +}); diff --git a/services/common/tests/unit/test_utils_sets.js b/services/common/tests/unit/test_utils_sets.js new file mode 100644 index 0000000000..c15d48528f --- /dev/null +++ b/services/common/tests/unit/test_utils_sets.js @@ -0,0 +1,66 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +const EMPTY = new Set(); +const A = new Set(["a"]); +const ABC = new Set(["a", "b", "c"]); +const ABCD = new Set(["a", "b", "c", "d"]); +const BC = new Set(["b", "c"]); +const BCD = new Set(["b", "c", "d"]); +const FGH = new Set(["f", "g", "h"]); +const BCDFGH = new Set(["b", "c", "d", "f", "g", "h"]); + +var union = CommonUtils.union; +var difference = CommonUtils.difference; +var intersection = CommonUtils.intersection; +var setEqual = CommonUtils.setEqual; + +function do_check_setEqual(a, b) { + Assert.ok(setEqual(a, b)); +} + +function do_check_not_setEqual(a, b) { + Assert.ok(!setEqual(a, b)); +} + +add_test(function test_setEqual() { + do_check_setEqual(EMPTY, EMPTY); + do_check_setEqual(EMPTY, new Set()); + do_check_setEqual(A, A); + do_check_setEqual(A, new Set(["a"])); + do_check_setEqual(new Set(["a"]), A); + do_check_not_setEqual(A, EMPTY); + do_check_not_setEqual(EMPTY, A); + do_check_not_setEqual(ABC, A); + run_next_test(); +}); + +add_test(function test_union() { + do_check_setEqual(EMPTY, union(EMPTY, EMPTY)); + do_check_setEqual(ABC, union(EMPTY, ABC)); + do_check_setEqual(ABC, union(ABC, ABC)); + do_check_setEqual(ABCD, union(ABC, BCD)); + do_check_setEqual(ABCD, union(BCD, ABC)); + do_check_setEqual(BCDFGH, union(BCD, FGH)); + run_next_test(); +}); + +add_test(function test_difference() { + do_check_setEqual(EMPTY, difference(EMPTY, EMPTY)); + do_check_setEqual(EMPTY, difference(EMPTY, A)); + do_check_setEqual(EMPTY, difference(A, A)); + do_check_setEqual(ABC, difference(ABC, EMPTY)); + do_check_setEqual(ABC, difference(ABC, FGH)); + do_check_setEqual(A, difference(ABC, BCD)); + run_next_test(); +}); + +add_test(function test_intersection() { + do_check_setEqual(EMPTY, intersection(EMPTY, EMPTY)); + do_check_setEqual(EMPTY, intersection(ABC, EMPTY)); + do_check_setEqual(EMPTY, intersection(ABC, FGH)); + do_check_setEqual(BC, intersection(ABC, BCD)); + run_next_test(); +}); diff --git a/services/common/tests/unit/test_utils_utf8.js b/services/common/tests/unit/test_utils_utf8.js new file mode 100644 index 0000000000..aa075873b9 --- /dev/null +++ b/services/common/tests/unit/test_utils_utf8.js @@ -0,0 +1,9 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +function run_test() { + let str = "Umlaute: \u00FC \u00E4\n"; // Umlaute: ü ä + let encoded = CommonUtils.encodeUTF8(str); + let decoded = CommonUtils.decodeUTF8(encoded); + Assert.equal(decoded, str); +} diff --git a/services/common/tests/unit/test_utils_uuid.js b/services/common/tests/unit/test_utils_uuid.js new file mode 100644 index 0000000000..33b407e92d --- /dev/null +++ b/services/common/tests/unit/test_utils_uuid.js @@ -0,0 +1,12 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +function run_test() { + let uuid = CommonUtils.generateUUID(); + Assert.equal(uuid.length, 36); + Assert.equal(uuid[8], "-"); + + run_next_test(); +} diff --git a/services/common/tests/unit/xpcshell.toml b/services/common/tests/unit/xpcshell.toml new file mode 100644 index 0000000000..e4035f66b2 --- /dev/null +++ b/services/common/tests/unit/xpcshell.toml @@ -0,0 +1,63 @@ +[DEFAULT] +head = "head_global.js head_helpers.js head_http.js" +firefox-appdir = "browser" +support-files = ["test_storage_adapter/**"] + +# Test load modules first so syntax failures are caught early. + +["test_async_chain.js"] + +["test_async_foreach.js"] + +["test_hawkclient.js"] +skip-if = ["os == 'android'"] + +["test_hawkrequest.js"] +skip-if = ["os == 'android'"] + +["test_kinto.js"] +tags = "blocklist" + +["test_load_modules.js"] + +["test_logmanager.js"] + +["test_observers.js"] + +["test_restrequest.js"] + +["test_storage_adapter.js"] +tags = "remote-settings blocklist" + +["test_storage_adapter_shutdown.js"] +tags = "remote-settings blocklist" + +["test_tokenauthenticatedrequest.js"] + +["test_tokenserverclient.js"] +skip-if = ["os == 'android'"] + +["test_uptake_telemetry.js"] +tags = "remote-settings" + +["test_utils_atob.js"] + +["test_utils_convert_string.js"] + +["test_utils_dateprefs.js"] + +["test_utils_encodeBase32.js"] + +["test_utils_encodeBase64URL.js"] + +["test_utils_ensureMillisecondsTimestamp.js"] + +["test_utils_makeURI.js"] + +["test_utils_namedTimer.js"] + +["test_utils_sets.js"] + +["test_utils_utf8.js"] + +["test_utils_uuid.js"] diff --git a/services/common/tokenserverclient.sys.mjs b/services/common/tokenserverclient.sys.mjs new file mode 100644 index 0000000000..c23e3d779c --- /dev/null +++ b/services/common/tokenserverclient.sys.mjs @@ -0,0 +1,392 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { Log } from "resource://gre/modules/Log.sys.mjs"; + +import { RESTRequest } from "resource://services-common/rest.sys.mjs"; +import { Observers } from "resource://services-common/observers.sys.mjs"; + +const PREF_LOG_LEVEL = "services.common.log.logger.tokenserverclient"; + +/** + * Represents a TokenServerClient error that occurred on the client. + * + * This is the base type for all errors raised by client operations. + * + * @param message + * (string) Error message. + */ +export function TokenServerClientError(message) { + this.name = "TokenServerClientError"; + this.message = message || "Client error."; + // Without explicitly setting .stack, all stacks from these errors will point + // to the "new Error()" call a few lines down, which isn't helpful. + this.stack = Error().stack; +} + +TokenServerClientError.prototype = new Error(); +TokenServerClientError.prototype.constructor = TokenServerClientError; +TokenServerClientError.prototype._toStringFields = function () { + return { message: this.message }; +}; +TokenServerClientError.prototype.toString = function () { + return this.name + "(" + JSON.stringify(this._toStringFields()) + ")"; +}; +TokenServerClientError.prototype.toJSON = function () { + let result = this._toStringFields(); + result.name = this.name; + return result; +}; + +/** + * Represents a TokenServerClient error that occurred in the network layer. + * + * @param error + * The underlying error thrown by the network layer. + */ +export function TokenServerClientNetworkError(error) { + this.name = "TokenServerClientNetworkError"; + this.error = error; + this.stack = Error().stack; +} + +TokenServerClientNetworkError.prototype = new TokenServerClientError(); +TokenServerClientNetworkError.prototype.constructor = + TokenServerClientNetworkError; +TokenServerClientNetworkError.prototype._toStringFields = function () { + return { error: this.error }; +}; + +/** + * Represents a TokenServerClient error that occurred on the server. + * + * This type will be encountered for all non-200 response codes from the + * server. The type of error is strongly enumerated and is stored in the + * `cause` property. This property can have the following string values: + * + * invalid-credentials -- A token could not be obtained because + * the credentials presented by the client were invalid. + * + * unknown-service -- The requested service was not found. + * + * malformed-request -- The server rejected the request because it + * was invalid. If you see this, code in this file is likely wrong. + * + * malformed-response -- The response from the server was not what was + * expected. + * + * general -- A general server error has occurred. Clients should + * interpret this as an opaque failure. + * + * @param message + * (string) Error message. + */ +export function TokenServerClientServerError(message, cause = "general") { + this.now = new Date().toISOString(); // may be useful to diagnose time-skew issues. + this.name = "TokenServerClientServerError"; + this.message = message || "Server error."; + this.cause = cause; + this.stack = Error().stack; +} + +TokenServerClientServerError.prototype = new TokenServerClientError(); +TokenServerClientServerError.prototype.constructor = + TokenServerClientServerError; + +TokenServerClientServerError.prototype._toStringFields = function () { + let fields = { + now: this.now, + message: this.message, + cause: this.cause, + }; + if (this.response) { + fields.response_body = this.response.body; + fields.response_headers = this.response.headers; + fields.response_status = this.response.status; + } + return fields; +}; + +/** + * Represents a client to the Token Server. + * + * http://docs.services.mozilla.com/token/index.html + * + * The Token Server was designed to support obtaining tokens for arbitrary apps by + * constructing URI paths of the form <app>/<app_version>. In practice this was + * never used and it only supports an <app> value of `sync`, and the API presented + * here reflects that. + * + * Areas to Improve: + * + * - The server sends a JSON response on error. The client does not currently + * parse this. It might be convenient if it did. + * - Currently most non-200 status codes are rolled into one error type. It + * might be helpful if callers had a richer API that communicated who was + * at fault (e.g. differentiating a 503 from a 401). + */ +export function TokenServerClient() { + this._log = Log.repository.getLogger("Services.Common.TokenServerClient"); + this._log.manageLevelFromPref(PREF_LOG_LEVEL); +} + +TokenServerClient.prototype = { + /** + * Logger instance. + */ + _log: null, + + /** + * Obtain a token from a provided OAuth token against a specific URL. + * + * This asynchronously obtains the token. + * It returns a Promise that resolves or rejects: + * + * Rejects with: + * (TokenServerClientError) If no token could be obtained, this + * will be a TokenServerClientError instance describing why. The + * type seen defines the type of error encountered. If an HTTP response + * was seen, a RESTResponse instance will be stored in the `response` + * property of this object. If there was no error and a token is + * available, this will be null. + * + * Resolves with: + * (map) On success, this will be a map containing the results from + * the server. If there was an error, this will be null. The map has the + * following properties: + * + * id (string) HTTP MAC public key identifier. + * key (string) HTTP MAC shared symmetric key. + * endpoint (string) URL where service can be connected to. + * uid (string) user ID for requested service. + * duration (string) the validity duration of the issued token. + * + * Example Usage + * ------------- + * + * let client = new TokenServerClient(); + * let access_token = getOAuthAccessTokenFromSomewhere(); + * let url = "https://token.services.mozilla.com/1.0/sync/2.0"; + * + * try { + * const result = await client.getTokenUsingOAuth(url, access_token); + * let {id, key, uid, endpoint, duration} = result; + * // Do stuff with data and carry on. + * } catch (error) { + * // Handle errors. + * } + * Obtain a token from a provided OAuth token against a specific URL. + * + * @param url + * (string) URL to fetch token from. + * @param oauthToken + * (string) FxA OAuth Token to exchange token for. + * @param addHeaders + * (object) Extra headers for the request. + */ + async getTokenUsingOAuth(url, oauthToken, addHeaders = {}) { + this._log.debug("Beginning OAuth token exchange: " + url); + + if (!oauthToken) { + throw new TokenServerClientError("oauthToken argument is not valid."); + } + + return this._tokenServerExchangeRequest( + url, + `Bearer ${oauthToken}`, + addHeaders + ); + }, + + /** + * Performs the exchange request to the token server to + * produce a token based on the authorizationHeader input. + * + * @param url + * (string) URL to fetch token from. + * @param authorizationHeader + * (string) The auth header string that populates the 'Authorization' header. + * @param addHeaders + * (object) Extra headers for the request. + */ + async _tokenServerExchangeRequest(url, authorizationHeader, addHeaders = {}) { + if (!url) { + throw new TokenServerClientError("url argument is not valid."); + } + + if (!authorizationHeader) { + throw new TokenServerClientError( + "authorizationHeader argument is not valid." + ); + } + + let req = this.newRESTRequest(url); + req.setHeader("Accept", "application/json"); + req.setHeader("Authorization", authorizationHeader); + + for (let header in addHeaders) { + req.setHeader(header, addHeaders[header]); + } + let response; + try { + response = await req.get(); + } catch (err) { + throw new TokenServerClientNetworkError(err); + } + + try { + return this._processTokenResponse(response); + } catch (ex) { + if (ex instanceof TokenServerClientServerError) { + throw ex; + } + this._log.warn("Error processing token server response", ex); + let error = new TokenServerClientError(ex); + error.response = response; + throw error; + } + }, + + /** + * Handler to process token request responses. + * + * @param response + * RESTResponse from token HTTP request. + */ + _processTokenResponse(response) { + this._log.debug("Got token response: " + response.status); + + // Responses should *always* be JSON, even in the case of 4xx and 5xx + // errors. If we don't see JSON, the server is likely very unhappy. + let ct = response.headers["content-type"] || ""; + if (ct != "application/json" && !ct.startsWith("application/json;")) { + this._log.warn("Did not receive JSON response. Misconfigured server?"); + this._log.debug("Content-Type: " + ct); + this._log.debug("Body: " + response.body); + + let error = new TokenServerClientServerError( + "Non-JSON response.", + "malformed-response" + ); + error.response = response; + throw error; + } + + let result; + try { + result = JSON.parse(response.body); + } catch (ex) { + this._log.warn("Invalid JSON returned by server: " + response.body); + let error = new TokenServerClientServerError( + "Malformed JSON.", + "malformed-response" + ); + error.response = response; + throw error; + } + + // Any response status can have X-Backoff or X-Weave-Backoff headers. + this._maybeNotifyBackoff(response, "x-weave-backoff"); + this._maybeNotifyBackoff(response, "x-backoff"); + + // The service shouldn't have any 3xx, so we don't need to handle those. + if (response.status != 200) { + // We /should/ have a Cornice error report in the JSON. We log that to + // help with debugging. + if ("errors" in result) { + // This could throw, but this entire function is wrapped in a try. If + // the server is sending something not an array of objects, it has + // failed to keep its contract with us and there is little we can do. + for (let error of result.errors) { + this._log.info("Server-reported error: " + JSON.stringify(error)); + } + } + + let error = new TokenServerClientServerError(); + error.response = response; + + if (response.status == 400) { + error.message = "Malformed request."; + error.cause = "malformed-request"; + } else if (response.status == 401) { + // Cause can be invalid-credentials, invalid-timestamp, or + // invalid-generation. + error.message = "Authentication failed."; + error.cause = result.status; + } else if (response.status == 404) { + error.message = "Unknown service."; + error.cause = "unknown-service"; + } + + // A Retry-After header should theoretically only appear on a 503, but + // we'll look for it on any error response. + this._maybeNotifyBackoff(response, "retry-after"); + + throw error; + } + + for (let k of ["id", "key", "api_endpoint", "uid", "duration"]) { + if (!(k in result)) { + let error = new TokenServerClientServerError( + "Expected key not present in result: " + k + ); + error.cause = "malformed-response"; + error.response = response; + throw error; + } + } + + this._log.debug("Successful token response"); + return { + id: result.id, + key: result.key, + endpoint: result.api_endpoint, + uid: result.uid, + duration: result.duration, + hashed_fxa_uid: result.hashed_fxa_uid, + node_type: result.node_type, + }; + }, + + /* + * The prefix used for all notifications sent by this module. This + * allows the handler of notifications to be sure they are handling + * notifications for the service they expect. + * + * If not set, no notifications will be sent. + */ + observerPrefix: null, + + // Given an optional header value, notify that a backoff has been requested. + _maybeNotifyBackoff(response, headerName) { + if (!this.observerPrefix) { + return; + } + let headerVal = response.headers[headerName]; + if (!headerVal) { + return; + } + let backoffInterval; + try { + backoffInterval = parseInt(headerVal, 10); + } catch (ex) { + this._log.error( + "TokenServer response had invalid backoff value in '" + + headerName + + "' header: " + + headerVal + ); + return; + } + Observers.notify( + this.observerPrefix + ":backoff:interval", + backoffInterval + ); + }, + + // override points for testing. + newRESTRequest(url) { + return new RESTRequest(url); + }, +}; diff --git a/services/common/uptake-telemetry.sys.mjs b/services/common/uptake-telemetry.sys.mjs new file mode 100644 index 0000000000..9b456b137f --- /dev/null +++ b/services/common/uptake-telemetry.sys.mjs @@ -0,0 +1,193 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs"; +import { AppConstants } from "resource://gre/modules/AppConstants.sys.mjs"; + +const lazy = {}; +ChromeUtils.defineESModuleGetters(lazy, { + ClientID: "resource://gre/modules/ClientID.sys.mjs", +}); + +ChromeUtils.defineLazyGetter(lazy, "CryptoHash", () => { + return Components.Constructor( + "@mozilla.org/security/hash;1", + "nsICryptoHash", + "initWithString" + ); +}); + +XPCOMUtils.defineLazyPreferenceGetter( + lazy, + "gSampleRate", + "services.common.uptake.sampleRate" +); + +// Telemetry events id (see Events.yaml). +const TELEMETRY_EVENTS_ID = "uptake.remotecontent.result"; + +/** + * A wrapper around certain low-level operations that can be substituted for testing. + */ +export var Policy = { + _clientIDHash: null, + + getClientID() { + return lazy.ClientID.getClientID(); + }, + + /** + * Compute an integer in the range [0, 100) using a hash of the + * client ID. + * + * This is useful for sampling clients when trying to report + * telemetry only for a sample of clients. + */ + async getClientIDHash() { + if (this._clientIDHash === null) { + this._clientIDHash = this._doComputeClientIDHash(); + } + return this._clientIDHash; + }, + + async _doComputeClientIDHash() { + const clientID = await this.getClientID(); + let byteArr = new TextEncoder().encode(clientID); + let hash = new lazy.CryptoHash("sha256"); + hash.update(byteArr, byteArr.length); + const bytes = hash.finish(false); + let rem = 0; + for (let i = 0, len = bytes.length; i < len; i++) { + rem = ((rem << 8) + (bytes[i].charCodeAt(0) & 0xff)) % 100; + } + return rem; + }, + + getChannel() { + return AppConstants.MOZ_UPDATE_CHANNEL; + }, +}; + +/** + * A Telemetry helper to report uptake of remote content. + */ +export class UptakeTelemetry { + /** + * Supported uptake statuses: + * + * - `UP_TO_DATE`: Local content was already up-to-date with remote content. + * - `SUCCESS`: Local content was updated successfully. + * - `BACKOFF`: Remote server asked clients to backoff. + * - `PARSE_ERROR`: Parsing server response has failed. + * - `CONTENT_ERROR`: Server response has unexpected content. + * - `PREF_DISABLED`: Update is disabled in user preferences. + * - `SIGNATURE_ERROR`: Signature verification after diff-based sync has failed. + * - `SIGNATURE_RETRY_ERROR`: Signature verification after full fetch has failed. + * - `CONFLICT_ERROR`: Some remote changes are in conflict with local changes. + * - `CORRUPTION_ERROR`: Error related to corrupted local data. + * - `SYNC_ERROR`: Synchronization of remote changes has failed. + * - `APPLY_ERROR`: Application of changes locally has failed. + * - `SERVER_ERROR`: Server failed to respond. + * - `CERTIFICATE_ERROR`: Server certificate verification has failed. + * - `DOWNLOAD_ERROR`: Data could not be fully retrieved. + * - `TIMEOUT_ERROR`: Server response has timed out. + * - `NETWORK_ERROR`: Communication with server has failed. + * - `NETWORK_OFFLINE_ERROR`: Network not available. + * - `SHUTDOWN_ERROR`: Error occuring during shutdown. + * - `UNKNOWN_ERROR`: Uncategorized error. + * - `CLEANUP_ERROR`: Clean-up of temporary files has failed. + * - `SYNC_BROKEN_ERROR`: Synchronization is broken. + * - `CUSTOM_1_ERROR`: Update source specific error #1. + * - `CUSTOM_2_ERROR`: Update source specific error #2. + * - `CUSTOM_3_ERROR`: Update source specific error #3. + * - `CUSTOM_4_ERROR`: Update source specific error #4. + * - `CUSTOM_5_ERROR`: Update source specific error #5. + * + * @type {Object} + */ + static get STATUS() { + return { + UP_TO_DATE: "up_to_date", + SUCCESS: "success", + BACKOFF: "backoff", + PARSE_ERROR: "parse_error", + CONTENT_ERROR: "content_error", + PREF_DISABLED: "pref_disabled", + SIGNATURE_ERROR: "sign_error", + SIGNATURE_RETRY_ERROR: "sign_retry_error", + CONFLICT_ERROR: "conflict_error", + CORRUPTION_ERROR: "corruption_error", + SYNC_ERROR: "sync_error", + APPLY_ERROR: "apply_error", + SERVER_ERROR: "server_error", + CERTIFICATE_ERROR: "certificate_error", + DOWNLOAD_ERROR: "download_error", + TIMEOUT_ERROR: "timeout_error", + NETWORK_ERROR: "network_error", + NETWORK_OFFLINE_ERROR: "offline_error", + SHUTDOWN_ERROR: "shutdown_error", + UNKNOWN_ERROR: "unknown_error", + CLEANUP_ERROR: "cleanup_error", + SYNC_BROKEN_ERROR: "sync_broken_error", + CUSTOM_1_ERROR: "custom_1_error", + CUSTOM_2_ERROR: "custom_2_error", + CUSTOM_3_ERROR: "custom_3_error", + CUSTOM_4_ERROR: "custom_4_error", + CUSTOM_5_ERROR: "custom_5_error", + }; + } + + static get Policy() { + return Policy; + } + + /** + * Reports the uptake status for the specified source. + * + * @param {string} component the component reporting the uptake (eg. "normandy"). + * @param {string} status the uptake status (eg. "network_error") + * @param {Object} extra extra values to report + * @param {string} extra.source the update source (eg. "recipe-42"). + * @param {string} extra.trigger what triggered the polling/fetching (eg. "broadcast", "timer"). + * @param {int} extra.age age of pulled data in seconds + */ + static async report(component, status, extra = {}) { + const { source } = extra; + + if (!source) { + throw new Error("`source` value is mandatory."); + } + + if (!Object.values(UptakeTelemetry.STATUS).includes(status)) { + throw new Error(`Unknown status '${status}'`); + } + + // Report event for real-time monitoring. See Events.yaml for registration. + // Contrary to histograms, Telemetry Events are not enabled by default. + // Enable them on first call to `report()`. + if (!this._eventsEnabled) { + Services.telemetry.setEventRecordingEnabled(TELEMETRY_EVENTS_ID, true); + this._eventsEnabled = true; + } + + const hash = await UptakeTelemetry.Policy.getClientIDHash(); + const channel = UptakeTelemetry.Policy.getChannel(); + const shouldSendEvent = + !["release", "esr"].includes(channel) || hash < lazy.gSampleRate; + if (shouldSendEvent) { + // The Event API requires `extra` values to be of type string. Force it! + const extraStr = Object.keys(extra).reduce((acc, k) => { + acc[k] = extra[k].toString(); + return acc; + }, {}); + Services.telemetry.recordEvent( + TELEMETRY_EVENTS_ID, + "uptake", + component, + status, + extraStr + ); + } + } +} diff --git a/services/common/utils.sys.mjs b/services/common/utils.sys.mjs new file mode 100644 index 0000000000..3b427c0ad4 --- /dev/null +++ b/services/common/utils.sys.mjs @@ -0,0 +1,693 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { Log } from "resource://gre/modules/Log.sys.mjs"; + +export var CommonUtils = { + /* + * Set manipulation methods. These should be lifted into toolkit, or added to + * `Set` itself. + */ + + /** + * Return elements of `a` or `b`. + */ + union(a, b) { + let out = new Set(a); + for (let x of b) { + out.add(x); + } + return out; + }, + + /** + * Return elements of `a` that are not present in `b`. + */ + difference(a, b) { + let out = new Set(a); + for (let x of b) { + out.delete(x); + } + return out; + }, + + /** + * Return elements of `a` that are also in `b`. + */ + intersection(a, b) { + let out = new Set(); + for (let x of a) { + if (b.has(x)) { + out.add(x); + } + } + return out; + }, + + /** + * Return true if `a` and `b` are the same size, and + * every element of `a` is in `b`. + */ + setEqual(a, b) { + if (a.size != b.size) { + return false; + } + for (let x of a) { + if (!b.has(x)) { + return false; + } + } + return true; + }, + + /** + * Checks elements in two arrays for equality, as determined by the `===` + * operator. This function does not perform a deep comparison; see Sync's + * `Util.deepEquals` for that. + */ + arrayEqual(a, b) { + if (a.length !== b.length) { + return false; + } + for (let i = 0; i < a.length; i++) { + if (a[i] !== b[i]) { + return false; + } + } + return true; + }, + + /** + * Encode byte string as base64URL (RFC 4648). + * + * @param bytes + * (string) Raw byte string to encode. + * @param pad + * (bool) Whether to include padding characters (=). Defaults + * to true for historical reasons. + */ + encodeBase64URL: function encodeBase64URL(bytes, pad = true) { + let s = btoa(bytes).replace(/\+/g, "-").replace(/\//g, "_"); + + if (!pad) { + return s.replace(/=+$/, ""); + } + + return s; + }, + + /** + * Create a nsIURI instance from a string. + */ + makeURI: function makeURI(URIString) { + if (!URIString) { + return null; + } + try { + return Services.io.newURI(URIString); + } catch (e) { + let log = Log.repository.getLogger("Common.Utils"); + log.debug("Could not create URI", e); + return null; + } + }, + + /** + * Execute a function on the next event loop tick. + * + * @param callback + * Function to invoke. + * @param thisObj [optional] + * Object to bind the callback to. + */ + nextTick: function nextTick(callback, thisObj) { + if (thisObj) { + callback = callback.bind(thisObj); + } + Services.tm.dispatchToMainThread(callback); + }, + + /** + * Return a timer that is scheduled to call the callback after waiting the + * provided time or as soon as possible. The timer will be set as a property + * of the provided object with the given timer name. + */ + namedTimer: function namedTimer(callback, wait, thisObj, name) { + if (!thisObj || !name) { + throw new Error( + "You must provide both an object and a property name for the timer!" + ); + } + + // Delay an existing timer if it exists + if (name in thisObj && thisObj[name] instanceof Ci.nsITimer) { + thisObj[name].delay = wait; + return thisObj[name]; + } + + // Create a special timer that we can add extra properties + let timer = Object.create( + Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer) + ); + + // Provide an easy way to clear out the timer + timer.clear = function () { + thisObj[name] = null; + timer.cancel(); + }; + + // Initialize the timer with a smart callback + timer.initWithCallback( + { + notify: function notify() { + // Clear out the timer once it's been triggered + timer.clear(); + callback.call(thisObj, timer); + }, + }, + wait, + timer.TYPE_ONE_SHOT + ); + + return (thisObj[name] = timer); + }, + + encodeUTF8: function encodeUTF8(str) { + try { + str = this._utf8Converter.ConvertFromUnicode(str); + return str + this._utf8Converter.Finish(); + } catch (ex) { + return null; + } + }, + + decodeUTF8: function decodeUTF8(str) { + try { + str = this._utf8Converter.ConvertToUnicode(str); + return str + this._utf8Converter.Finish(); + } catch (ex) { + return null; + } + }, + + byteArrayToString: function byteArrayToString(bytes) { + return bytes.map(byte => String.fromCharCode(byte)).join(""); + }, + + stringToByteArray: function stringToByteArray(bytesString) { + return Array.prototype.slice.call(bytesString).map(c => c.charCodeAt(0)); + }, + + // A lot of Util methods work with byte strings instead of ArrayBuffers. + // A patch should address this problem, but in the meantime let's provide + // helpers method to convert byte strings to Uint8Array. + byteStringToArrayBuffer(byteString) { + if (byteString === undefined) { + return new Uint8Array(); + } + const bytes = new Uint8Array(byteString.length); + for (let i = 0; i < byteString.length; ++i) { + bytes[i] = byteString.charCodeAt(i) & 0xff; + } + return bytes; + }, + + arrayBufferToByteString(buffer) { + return CommonUtils.byteArrayToString([...buffer]); + }, + + bufferToHex(buffer) { + return Array.prototype.map + .call(buffer, x => ("00" + x.toString(16)).slice(-2)) + .join(""); + }, + + bytesAsHex: function bytesAsHex(bytes) { + let s = ""; + for (let i = 0, len = bytes.length; i < len; i++) { + let c = (bytes[i].charCodeAt(0) & 0xff).toString(16); + if (c.length == 1) { + c = "0" + c; + } + s += c; + } + return s; + }, + + stringAsHex: function stringAsHex(str) { + return CommonUtils.bytesAsHex(CommonUtils.encodeUTF8(str)); + }, + + stringToBytes: function stringToBytes(str) { + return CommonUtils.hexToBytes(CommonUtils.stringAsHex(str)); + }, + + hexToBytes: function hexToBytes(str) { + let bytes = []; + for (let i = 0; i < str.length - 1; i += 2) { + bytes.push(parseInt(str.substr(i, 2), 16)); + } + return String.fromCharCode.apply(String, bytes); + }, + + hexToArrayBuffer(str) { + const octString = CommonUtils.hexToBytes(str); + return CommonUtils.byteStringToArrayBuffer(octString); + }, + + hexAsString: function hexAsString(hex) { + return CommonUtils.decodeUTF8(CommonUtils.hexToBytes(hex)); + }, + + base64urlToHex(b64str) { + return CommonUtils.bufferToHex( + new Uint8Array(ChromeUtils.base64URLDecode(b64str, { padding: "reject" })) + ); + }, + + /** + * Base32 encode (RFC 4648) a string + */ + encodeBase32: function encodeBase32(bytes) { + const key = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; + let leftover = bytes.length % 5; + + // Pad the last quantum with zeros so the length is a multiple of 5. + if (leftover) { + for (let i = leftover; i < 5; i++) { + bytes += "\0"; + } + } + + // Chop the string into quanta of 5 bytes (40 bits). Each quantum + // is turned into 8 characters from the 32 character base. + let ret = ""; + for (let i = 0; i < bytes.length; i += 5) { + let c = Array.prototype.slice + .call(bytes.slice(i, i + 5)) + .map(byte => byte.charCodeAt(0)); + ret += + key[c[0] >> 3] + + key[((c[0] << 2) & 0x1f) | (c[1] >> 6)] + + key[(c[1] >> 1) & 0x1f] + + key[((c[1] << 4) & 0x1f) | (c[2] >> 4)] + + key[((c[2] << 1) & 0x1f) | (c[3] >> 7)] + + key[(c[3] >> 2) & 0x1f] + + key[((c[3] << 3) & 0x1f) | (c[4] >> 5)] + + key[c[4] & 0x1f]; + } + + switch (leftover) { + case 1: + return ret.slice(0, -6) + "======"; + case 2: + return ret.slice(0, -4) + "===="; + case 3: + return ret.slice(0, -3) + "==="; + case 4: + return ret.slice(0, -1) + "="; + default: + return ret; + } + }, + + /** + * Base32 decode (RFC 4648) a string. + */ + decodeBase32: function decodeBase32(str) { + const key = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; + + let padChar = str.indexOf("="); + let chars = padChar == -1 ? str.length : padChar; + let bytes = Math.floor((chars * 5) / 8); + let blocks = Math.ceil(chars / 8); + + // Process a chunk of 5 bytes / 8 characters. + // The processing of this is known in advance, + // so avoid arithmetic! + function processBlock(ret, cOffset, rOffset) { + let c, val; + + // N.B., this relies on + // undefined | foo == foo. + function accumulate(val) { + ret[rOffset] |= val; + } + + function advance() { + c = str[cOffset++]; + if (!c || c == "" || c == "=") { + // Easier than range checking. + throw new Error("Done"); + } // Will be caught far away. + val = key.indexOf(c); + if (val == -1) { + throw new Error(`Unknown character in base32: ${c}`); + } + } + + // Handle a left shift, restricted to bytes. + function left(octet, shift) { + return (octet << shift) & 0xff; + } + + advance(); + accumulate(left(val, 3)); + advance(); + accumulate(val >> 2); + ++rOffset; + accumulate(left(val, 6)); + advance(); + accumulate(left(val, 1)); + advance(); + accumulate(val >> 4); + ++rOffset; + accumulate(left(val, 4)); + advance(); + accumulate(val >> 1); + ++rOffset; + accumulate(left(val, 7)); + advance(); + accumulate(left(val, 2)); + advance(); + accumulate(val >> 3); + ++rOffset; + accumulate(left(val, 5)); + advance(); + accumulate(val); + ++rOffset; + } + + // Our output. Define to be explicit (and maybe the compiler will be smart). + let ret = new Array(bytes); + let i = 0; + let cOff = 0; + let rOff = 0; + + for (; i < blocks; ++i) { + try { + processBlock(ret, cOff, rOff); + } catch (ex) { + // Handle the detection of padding. + if (ex.message == "Done") { + break; + } + throw ex; + } + cOff += 8; + rOff += 5; + } + + // Slice in case our shift overflowed to the right. + return CommonUtils.byteArrayToString(ret.slice(0, bytes)); + }, + + /** + * Trim excess padding from a Base64 string and atob(). + * + * See bug 562431 comment 4. + */ + safeAtoB: function safeAtoB(b64) { + let len = b64.length; + let over = len % 4; + return over ? atob(b64.substr(0, len - over)) : atob(b64); + }, + + /** + * Ensure that the specified value is defined in integer milliseconds since + * UNIX epoch. + * + * This throws an error if the value is not an integer, is negative, or looks + * like seconds, not milliseconds. + * + * If the value is null or 0, no exception is raised. + * + * @param value + * Value to validate. + */ + ensureMillisecondsTimestamp: function ensureMillisecondsTimestamp(value) { + if (!value) { + return; + } + + if (!/^[0-9]+$/.test(value)) { + throw new Error("Timestamp value is not a positive integer: " + value); + } + + let intValue = parseInt(value, 10); + + if (!intValue) { + return; + } + + // Catch what looks like seconds, not milliseconds. + if (intValue < 10000000000) { + throw new Error("Timestamp appears to be in seconds: " + intValue); + } + }, + + /** + * Read bytes from an nsIInputStream into a string. + * + * @param stream + * (nsIInputStream) Stream to read from. + * @param count + * (number) Integer number of bytes to read. If not defined, or + * 0, all available input is read. + */ + readBytesFromInputStream: function readBytesFromInputStream(stream, count) { + let BinaryInputStream = Components.Constructor( + "@mozilla.org/binaryinputstream;1", + "nsIBinaryInputStream", + "setInputStream" + ); + if (!count) { + count = stream.available(); + } + + return new BinaryInputStream(stream).readBytes(count); + }, + + /** + * Generate a new UUID using nsIUUIDGenerator. + * + * Example value: "1e00a2e2-1570-443e-bf5e-000354124234" + * + * @return string A hex-formatted UUID string. + */ + generateUUID: function generateUUID() { + let uuid = Services.uuid.generateUUID().toString(); + + return uuid.substring(1, uuid.length - 1); + }, + + /** + * Obtain an epoch value from a preference. + * + * This reads a string preference and returns an integer. The string + * preference is expected to contain the integer milliseconds since epoch. + * For best results, only read preferences that have been saved with + * setDatePref(). + * + * We need to store times as strings because integer preferences are only + * 32 bits and likely overflow most dates. + * + * If the pref contains a non-integer value, the specified default value will + * be returned. + * + * @param branch + * (Preferences) Branch from which to retrieve preference. + * @param pref + * (string) The preference to read from. + * @param def + * (Number) The default value to use if the preference is not defined. + * @param log + * (Log.Logger) Logger to write warnings to. + */ + getEpochPref: function getEpochPref(branch, pref, def = 0, log = null) { + if (!Number.isInteger(def)) { + throw new Error("Default value is not a number: " + def); + } + + let valueStr = branch.getStringPref(pref, null); + + if (valueStr !== null) { + let valueInt = parseInt(valueStr, 10); + if (Number.isNaN(valueInt)) { + if (log) { + log.warn( + "Preference value is not an integer. Using default. " + + pref + + "=" + + valueStr + + " -> " + + def + ); + } + + return def; + } + + return valueInt; + } + + return def; + }, + + /** + * Obtain a Date from a preference. + * + * This is a wrapper around getEpochPref. It converts the value to a Date + * instance and performs simple range checking. + * + * The range checking ensures the date is newer than the oldestYear + * parameter. + * + * @param branch + * (Preferences) Branch from which to read preference. + * @param pref + * (string) The preference from which to read. + * @param def + * (Number) The default value (in milliseconds) if the preference is + * not defined or invalid. + * @param log + * (Log.Logger) Logger to write warnings to. + * @param oldestYear + * (Number) Oldest year to accept in read values. + */ + getDatePref: function getDatePref( + branch, + pref, + def = 0, + log = null, + oldestYear = 2010 + ) { + let valueInt = this.getEpochPref(branch, pref, def, log); + let date = new Date(valueInt); + + if (valueInt == def || date.getFullYear() >= oldestYear) { + return date; + } + + if (log) { + log.warn( + "Unexpected old date seen in pref. Returning default: " + + pref + + "=" + + date + + " -> " + + def + ); + } + + return new Date(def); + }, + + /** + * Store a Date in a preference. + * + * This is the opposite of getDatePref(). The same notes apply. + * + * If the range check fails, an Error will be thrown instead of a default + * value silently being used. + * + * @param branch + * (Preference) Branch from which to read preference. + * @param pref + * (string) Name of preference to write to. + * @param date + * (Date) The value to save. + * @param oldestYear + * (Number) The oldest year to accept for values. + */ + setDatePref: function setDatePref(branch, pref, date, oldestYear = 2010) { + if (date.getFullYear() < oldestYear) { + throw new Error( + "Trying to set " + + pref + + " to a very old time: " + + date + + ". The current time is " + + new Date() + + ". Is the system clock wrong?" + ); + } + + branch.setStringPref(pref, "" + date.getTime()); + }, + + /** + * Convert a string between two encodings. + * + * Output is only guaranteed if the input stream is composed of octets. If + * the input string has characters with values larger than 255, data loss + * will occur. + * + * The returned string is guaranteed to consist of character codes no greater + * than 255. + * + * @param s + * (string) The source string to convert. + * @param source + * (string) The current encoding of the string. + * @param dest + * (string) The target encoding of the string. + * + * @return string + */ + convertString: function convertString(s, source, dest) { + if (!s) { + throw new Error("Input string must be defined."); + } + + let is = Cc["@mozilla.org/io/string-input-stream;1"].createInstance( + Ci.nsIStringInputStream + ); + is.setData(s, s.length); + + let listener = Cc["@mozilla.org/network/stream-loader;1"].createInstance( + Ci.nsIStreamLoader + ); + + let result; + + listener.init({ + onStreamComplete: function onStreamComplete( + loader, + context, + status, + length, + data + ) { + result = String.fromCharCode.apply(this, data); + }, + }); + + let converter = this._converterService.asyncConvertData( + source, + dest, + listener, + null + ); + converter.onStartRequest(null, null); + converter.onDataAvailable(null, is, 0, s.length); + converter.onStopRequest(null, null, null); + + return result; + }, +}; + +ChromeUtils.defineLazyGetter(CommonUtils, "_utf8Converter", function () { + let converter = Cc[ + "@mozilla.org/intl/scriptableunicodeconverter" + ].createInstance(Ci.nsIScriptableUnicodeConverter); + converter.charset = "UTF-8"; + return converter; +}); + +ChromeUtils.defineLazyGetter(CommonUtils, "_converterService", function () { + return Cc["@mozilla.org/streamConverters;1"].getService( + Ci.nsIStreamConverterService + ); +}); |