summaryrefslogtreecommitdiffstats
path: root/third_party/rlbox
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/rlbox/README-mozilla10
-rw-r--r--third_party/rlbox/include/rlbox.hpp1349
-rw-r--r--third_party/rlbox/include/rlbox_app_pointer.hpp93
-rw-r--r--third_party/rlbox/include/rlbox_conversion.hpp273
-rw-r--r--third_party/rlbox/include/rlbox_dylib_sandbox.hpp314
-rw-r--r--third_party/rlbox/include/rlbox_helpers.hpp216
-rw-r--r--third_party/rlbox/include/rlbox_noop_sandbox.hpp254
-rw-r--r--third_party/rlbox/include/rlbox_policy_types.hpp387
-rw-r--r--third_party/rlbox/include/rlbox_range.hpp32
-rw-r--r--third_party/rlbox/include/rlbox_sandbox.hpp1094
-rw-r--r--third_party/rlbox/include/rlbox_stdlib.hpp329
-rw-r--r--third_party/rlbox/include/rlbox_stdlib_polyfill.hpp175
-rw-r--r--third_party/rlbox/include/rlbox_struct_support.hpp353
-rw-r--r--third_party/rlbox/include/rlbox_type_traits.hpp546
-rw-r--r--third_party/rlbox/include/rlbox_types.hpp87
-rw-r--r--third_party/rlbox/include/rlbox_unwrap.hpp25
-rw-r--r--third_party/rlbox/include/rlbox_wrapper_traits.hpp171
-rwxr-xr-xthird_party/rlbox/update.sh28
-rwxr-xr-xthird_party/rlbox_wasm2c_sandbox/LICENSE21
-rwxr-xr-xthird_party/rlbox_wasm2c_sandbox/c_src/wasm2c_sandbox_wrapper.c7
-rw-r--r--third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_sandbox.hpp971
-rw-r--r--third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_tls.hpp33
-rw-r--r--third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_mem.h46
-rw-r--r--third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_minwasi.h38
-rw-r--r--third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_mem.c454
-rw-r--r--third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_minwasi.c799
26 files changed, 8105 insertions, 0 deletions
diff --git a/third_party/rlbox/README-mozilla b/third_party/rlbox/README-mozilla
new file mode 100644
index 0000000000..fe6192b655
--- /dev/null
+++ b/third_party/rlbox/README-mozilla
@@ -0,0 +1,10 @@
+This directory contains the rlbox source from the upstream repo:
+https://github.com/PLSysSec/rlbox_sandboxing_api/
+
+Current version: [commit 358fb5bb02a326c631efaebdfb59b0df2ab9c602]
+
+UPDATING:
+
+This in-tree copy can be updated by running
+ sh update.sh
+from within the third_party/rlbox directory.
diff --git a/third_party/rlbox/include/rlbox.hpp b/third_party/rlbox/include/rlbox.hpp
new file mode 100644
index 0000000000..96daaac233
--- /dev/null
+++ b/third_party/rlbox/include/rlbox.hpp
@@ -0,0 +1,1349 @@
+#pragma once
+
+#include <array>
+#include <cstring>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "rlbox_app_pointer.hpp"
+#include "rlbox_conversion.hpp"
+#include "rlbox_helpers.hpp"
+#include "rlbox_policy_types.hpp"
+#include "rlbox_range.hpp"
+#include "rlbox_sandbox.hpp"
+#include "rlbox_stdlib.hpp"
+#include "rlbox_struct_support.hpp"
+#include "rlbox_type_traits.hpp"
+#include "rlbox_types.hpp"
+#include "rlbox_unwrap.hpp"
+#include "rlbox_wrapper_traits.hpp"
+
+namespace rlbox {
+
+template<template<typename, typename> typename T_Wrap,
+ typename T,
+ typename T_Sbx>
+class tainted_base_impl
+{
+ KEEP_CLASSES_FRIENDLY
+ KEEP_CAST_FRIENDLY
+
+public:
+ inline auto& impl() { return *static_cast<T_Wrap<T, T_Sbx>*>(this); }
+ inline auto& impl() const
+ {
+ return *static_cast<const T_Wrap<T, T_Sbx>*>(this);
+ }
+
+ /**
+ * @brief Unwrap a tainted value without verification. This is an unsafe
+ * operation and should be used with care.
+ */
+ inline auto UNSAFE_unverified() const { return impl().get_raw_value(); }
+ /**
+ * @brief Like UNSAFE_unverified, but get the underlying sandbox
+ * representation.
+ *
+ * @param sandbox Reference to sandbox.
+ *
+ * For the Wasm-based sandbox, this function additionally validates the
+ * unwrapped value against the machine model of the sandbox (LP32).
+ */
+ inline auto UNSAFE_sandboxed(rlbox_sandbox<T_Sbx>& sandbox) const
+ {
+ return impl().get_raw_sandbox_value(sandbox);
+ }
+
+ /**
+ * @brief Unwrap a tainted value without verification. This function should
+ * be used when unwrapping is safe.
+ *
+ * @param reason An explanation why the unverified unwrapping is safe.
+ */
+ template<size_t N>
+ inline auto unverified_safe_because(const char (&reason)[N]) const
+ {
+ RLBOX_UNUSED(reason);
+ static_assert(!std::is_pointer_v<T>,
+ "unverified_safe_because does not support pointers. Use "
+ "unverified_safe_pointer_because.");
+ return UNSAFE_unverified();
+ }
+
+ template<size_t N>
+ inline auto unverified_safe_pointer_because(size_t count,
+ const char (&reason)[N]) const
+ {
+ RLBOX_UNUSED(reason);
+
+ static_assert(std::is_pointer_v<T>, "Expected pointer type");
+ using T_Pointed = std::remove_pointer_t<T>;
+ if_constexpr_named(cond1, std::is_pointer_v<T_Pointed>)
+ {
+ rlbox_detail_static_fail_because(
+ cond1,
+ "There is no way to use unverified_safe_pointer_because for "
+ "'pointers to pointers' safely. Use copy_and_verify instead.");
+ return nullptr;
+ }
+
+ auto ret = UNSAFE_unverified();
+ if (ret != nullptr) {
+ size_t bytes = sizeof(T) * count;
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(ret, bytes);
+ }
+ return ret;
+ }
+
+ inline auto INTERNAL_unverified_safe() const { return UNSAFE_unverified(); }
+
+#define BinaryOpValAndPtr(opSymbol) \
+ template<typename T_Rhs> \
+ inline constexpr auto operator opSymbol(const T_Rhs& rhs) \
+ const->tainted<decltype(std::declval<T>() opSymbol std::declval< \
+ detail::rlbox_remove_wrapper_t<T_Rhs>>()), \
+ T_Sbx> \
+ { \
+ static_assert(detail::is_basic_type_v<T>, \
+ "Operator " #opSymbol \
+ " only supported for primitive and pointer types"); \
+ \
+ auto raw_rhs = detail::unwrap_value(rhs); \
+ \
+ if constexpr (std::is_pointer_v<T>) { \
+ static_assert(std::is_integral_v<decltype(raw_rhs)>, \
+ "Can only operate on numeric types"); \
+ auto ptr = impl().get_raw_value(); \
+ detail::dynamic_check(ptr != nullptr, \
+ "Pointer arithmetic on a null pointer"); \
+ /* increment the target by size of the data structure */ \
+ auto target = \
+ reinterpret_cast<uintptr_t>(ptr) opSymbol raw_rhs * sizeof(*impl()); \
+ auto no_overflow = rlbox_sandbox<T_Sbx>::is_in_same_sandbox( \
+ reinterpret_cast<const void*>(ptr), \
+ reinterpret_cast<const void*>(target)); \
+ detail::dynamic_check( \
+ no_overflow, \
+ "Pointer arithmetic overflowed a pointer beyond sandbox memory"); \
+ \
+ return tainted<T, T_Sbx>::internal_factory(reinterpret_cast<T>(target)); \
+ } else { \
+ auto raw = impl().get_raw_value(); \
+ auto ret = raw opSymbol raw_rhs; \
+ using T_Ret = decltype(ret); \
+ return tainted<T_Ret, T_Sbx>::internal_factory(ret); \
+ } \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ BinaryOpValAndPtr(+);
+ BinaryOpValAndPtr(-);
+
+#undef BinaryOpValAndPtr
+
+#define BinaryOp(opSymbol) \
+ template<typename T_Rhs> \
+ inline constexpr auto operator opSymbol(const T_Rhs& rhs) \
+ const->tainted<decltype(std::declval<T>() opSymbol std::declval< \
+ detail::rlbox_remove_wrapper_t<T_Rhs>>()), \
+ T_Sbx> \
+ { \
+ static_assert(detail::is_fundamental_or_enum_v<T>, \
+ "Operator " #opSymbol \
+ " only supported for primitive types"); \
+ \
+ auto raw = impl().get_raw_value(); \
+ auto raw_rhs = detail::unwrap_value(rhs); \
+ static_assert(std::is_integral_v<decltype(raw_rhs)> \
+ || std::is_floating_point_v<decltype(raw_rhs)>, \
+ "Can only operate on numeric types"); \
+ \
+ auto ret = raw opSymbol raw_rhs; \
+ using T_Ret = decltype(ret); \
+ return tainted<T_Ret, T_Sbx>::internal_factory(ret); \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ BinaryOp(*);
+ BinaryOp(/);
+ BinaryOp(%);
+ BinaryOp(^);
+ BinaryOp(&);
+ BinaryOp(|);
+ BinaryOp(<<);
+ BinaryOp(>>);
+
+#undef BinaryOp
+
+#define CompoundAssignmentOp(opSymbol) \
+ template<typename T_Rhs> \
+ inline constexpr T_Wrap<T, T_Sbx>& operator opSymbol##=(const T_Rhs& rhs) \
+ { \
+ auto& this_ref = impl(); \
+ this_ref = this_ref opSymbol rhs; \
+ return this_ref; \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ CompoundAssignmentOp(+);
+ CompoundAssignmentOp(-);
+ CompoundAssignmentOp(*);
+ CompoundAssignmentOp(/);
+ CompoundAssignmentOp(%);
+ CompoundAssignmentOp(^);
+ CompoundAssignmentOp(&);
+ CompoundAssignmentOp(|);
+ CompoundAssignmentOp(<<);
+ CompoundAssignmentOp(>>);
+
+#undef CompoundAssignmentOp
+
+#define PreIncDecOps(opSymbol) \
+ inline constexpr T_Wrap<T, T_Sbx>& operator opSymbol##opSymbol() \
+ { \
+ auto& this_ref = impl(); \
+ this_ref = this_ref opSymbol 1; \
+ return this_ref; \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ PreIncDecOps(+);
+ PreIncDecOps(-);
+
+#undef PreIncDecOps
+
+#define PostIncDecOps(opSymbol) \
+ inline constexpr T_Wrap<T, T_Sbx> operator opSymbol##opSymbol(int) \
+ { \
+ tainted<T, T_Sbx> ret = impl(); \
+ operator++(); \
+ return ret; \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ PostIncDecOps(+);
+ PostIncDecOps(-);
+
+#undef PostIncDecOps
+
+#define BooleanBinaryOp(opSymbol) \
+ template<typename T_Rhs> \
+ inline constexpr auto operator opSymbol(const T_Rhs& rhs) \
+ const->tainted<decltype(std::declval<T>() opSymbol std::declval< \
+ detail::rlbox_remove_wrapper_t<T_Rhs>>()), \
+ T_Sbx> \
+ { \
+ static_assert(detail::is_fundamental_or_enum_v<T>, \
+ "Operator " #opSymbol \
+ " only supported for primitive types"); \
+ \
+ auto raw = impl().get_raw_value(); \
+ auto raw_rhs = detail::unwrap_value(rhs); \
+ static_assert(std::is_integral_v<decltype(raw_rhs)>, \
+ "Can only operate on numeric types"); \
+ \
+ auto ret = raw opSymbol raw_rhs; \
+ using T_Ret = decltype(ret); \
+ return tainted<T_Ret, T_Sbx>::internal_factory(ret); \
+ } \
+ \
+ template<typename T_Rhs> \
+ inline constexpr auto operator opSymbol(const T_Rhs&&) \
+ const->tainted<decltype(std::declval<T>() opSymbol std::declval< \
+ detail::rlbox_remove_wrapper_t<T_Rhs>>()), \
+ T_Sbx> \
+ { \
+ rlbox_detail_static_fail_because( \
+ detail::true_v<T_Rhs>, \
+ "C++ does not permit safe overloading of && and || operations as this " \
+ "affects the short circuiting behaviour of these operations. RLBox " \
+ "does let you use && and || with tainted in limited situations - when " \
+ "all arguments starting from the second are local variables. It does " \
+ "not allow it if arguments starting from the second are expressions.\n" \
+ "For example the following is not allowed\n" \
+ "\n" \
+ "tainted<bool, T_Sbx> a = true;\n" \
+ "auto r = a && true && sandbox.invoke_sandbox_function(getBool);\n" \
+ "\n" \
+ "However the following would be allowed\n" \
+ "tainted<bool, T_Sbx> a = true;\n" \
+ "auto b = true\n" \
+ "auto c = sandbox.invoke_sandbox_function(getBool);\n" \
+ "auto r = a && b && c;\n" \
+ "\n" \
+ "Note that these 2 programs are not identical. The first program may " \
+ "or may not call getBool, while second program always calls getBool"); \
+ return tainted<bool, T_Sbx>(false); \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ BooleanBinaryOp(&&);
+ BooleanBinaryOp(||);
+
+#undef BooleanBinaryOp
+
+#define UnaryOp(opSymbol) \
+ inline auto operator opSymbol() \
+ { \
+ static_assert(detail::is_fundamental_or_enum_v<T>, \
+ "Operator " #opSymbol " only supported for primitive"); \
+ \
+ auto raw = impl().get_raw_value(); \
+ auto ret = opSymbol raw; \
+ using T_Ret = decltype(ret); \
+ return tainted<T_Ret, T_Sbx>::internal_factory(ret); \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ UnaryOp(-);
+ UnaryOp(~);
+
+#undef UnaryOp
+
+/**
+ * @brief Comparison operators. Comparisons to values in sandbox memory can
+ * only return a "tainted_boolean_hint" as the values in memory can be
+ * incorrect or malicously change in the future.
+ *
+ * @tparam T_Rhs
+ * @param rhs
+ * @return One of either a bool, tainted<bool>, or a tainted_boolean_hint
+ * depending on the arguments to the binary expression.
+ */
+#define CompareOp(opSymbol, permit_pointers) \
+ template<typename T_Rhs> \
+ inline constexpr auto operator opSymbol(const T_Rhs& rhs) const \
+ { \
+ using T_RhsNoQ = detail::remove_cv_ref_t<T_Rhs>; \
+ constexpr bool check_rhs_hint = \
+ detail::rlbox_is_tainted_volatile_v<T_RhsNoQ> || \
+ detail::rlbox_is_tainted_boolean_hint_v<T_RhsNoQ>; \
+ constexpr bool check_lhs_hint = \
+ detail::rlbox_is_tainted_volatile_v<T_Wrap<T, T_Sbx>>; \
+ constexpr bool is_hint = check_lhs_hint || check_rhs_hint; \
+ \
+ constexpr bool is_unwrapped = \
+ detail::rlbox_is_tainted_v<T_Wrap<T, T_Sbx>> && \
+ std::is_null_pointer_v<T_RhsNoQ>; \
+ \
+ /* Sanity check - can't be a hint and unwrapped */ \
+ static_assert(is_hint ? !is_unwrapped : true, \
+ "Internal error: Could not deduce type for comparison. " \
+ "Please file a bug."); \
+ \
+ if constexpr (!permit_pointers && std::is_pointer_v<T>) { \
+ rlbox_detail_static_fail_because( \
+ std::is_pointer_v<T>, \
+ "Only == and != comparisons are allowed for pointers"); \
+ } \
+ \
+ bool ret = (impl().get_raw_value() opSymbol detail::unwrap_value(rhs)); \
+ \
+ if constexpr (is_hint) { \
+ return tainted_boolean_hint(ret); \
+ } else if constexpr (is_unwrapped) { \
+ return ret; \
+ } else { \
+ return tainted<bool, T_Sbx>(ret); \
+ } \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ CompareOp(==, true /* permit_pointers */);
+ CompareOp(!=, true /* permit_pointers */);
+ CompareOp(<, false /* permit_pointers */);
+ CompareOp(<=, false /* permit_pointers */);
+ CompareOp(>, false /* permit_pointers */);
+ CompareOp(>=, false /* permit_pointers */);
+
+#undef CompareOp
+
+private:
+ using T_OpSubscriptArrRet = std::conditional_t<
+ std::is_pointer_v<T>,
+ tainted_volatile<detail::dereference_result_t<T>, T_Sbx>, // is_pointer
+ T_Wrap<detail::dereference_result_t<T>, T_Sbx> // is_array
+ >;
+
+public:
+ template<typename T_Rhs>
+ inline const T_OpSubscriptArrRet& operator[](T_Rhs&& rhs) const
+ {
+ static_assert(std::is_pointer_v<T> || detail::is_c_or_std_array_v<T>,
+ "Operator [] supports pointers and arrays only");
+
+ auto raw_rhs = detail::unwrap_value(rhs);
+ static_assert(std::is_integral_v<decltype(raw_rhs)>,
+ "Can only index with numeric types");
+
+ if constexpr (std::is_pointer_v<T>) {
+ auto ptr = this->impl().get_raw_value();
+
+ // increment the target by size of the data structure
+ auto target =
+ reinterpret_cast<uintptr_t>(ptr) + raw_rhs * sizeof(*this->impl());
+ auto no_overflow = rlbox_sandbox<T_Sbx>::is_in_same_sandbox(
+ ptr, reinterpret_cast<const void*>(target));
+ detail::dynamic_check(
+ no_overflow,
+ "Pointer arithmetic overflowed a pointer beyond sandbox memory");
+
+ auto target_wrap = tainted<const T, T_Sbx>::internal_factory(
+ reinterpret_cast<const T>(target));
+ return *target_wrap;
+ } else {
+ using T_Rhs_Unsigned = std::make_unsigned_t<decltype(raw_rhs)>;
+ detail::dynamic_check(
+ raw_rhs >= 0 && static_cast<T_Rhs_Unsigned>(raw_rhs) <
+ std::extent_v<detail::std_array_to_c_arr_t<T>, 0>,
+ "Static array indexing overflow");
+
+ const void* target_ptr;
+ if constexpr (detail::rlbox_is_tainted_v<T_Wrap<T, T_Sbx>>) {
+ auto& data_ref = impl().get_raw_value_ref();
+ target_ptr = &(data_ref[raw_rhs]);
+ } else {
+ auto& data_ref = impl().get_sandbox_value_ref();
+ auto target_ptr_vol = &(data_ref[raw_rhs]);
+ // target_ptr is a volatile... remove this.
+ // Safe as we will return a tainted_volatile if this is the case
+ target_ptr = detail::remove_volatile_from_ptr_cast(target_ptr_vol);
+ }
+
+ using T_Target = const T_Wrap<detail::dereference_result_t<T>, T_Sbx>;
+ auto wrapped_target_ptr = reinterpret_cast<T_Target*>(target_ptr);
+ return *wrapped_target_ptr;
+ }
+ }
+
+ template<typename T_Rhs>
+ inline T_OpSubscriptArrRet& operator[](T_Rhs&& rhs)
+ {
+ return const_cast<T_OpSubscriptArrRet&>(std::as_const(*this)[rhs]);
+ }
+
+private:
+ using T_OpDerefRet = tainted_volatile<std::remove_pointer_t<T>, T_Sbx>;
+
+public:
+ inline T_OpDerefRet& operator*() const
+ {
+ static_assert(std::is_pointer_v<T>, "Operator * only allowed on pointers");
+ auto ret_ptr_const =
+ reinterpret_cast<const T_OpDerefRet*>(impl().get_raw_value());
+ // Safe - If T_OpDerefRet is not a const ptr, this is trivially safe
+ // If T_OpDerefRet is a const ptr, then the const is captured
+ // inside the wrapper
+ auto ret_ptr = const_cast<T_OpDerefRet*>(ret_ptr_const);
+ return *ret_ptr;
+ }
+
+ // We need to implement the -> operator even if T is not a struct
+ // So that we can support code patterns such as the below
+ // tainted<T*> a;
+ // a->UNSAFE_unverified();
+ inline const T_OpDerefRet* operator->() const
+ {
+ static_assert(std::is_pointer_v<T>,
+ "Operator -> only supported for pointer types");
+ return reinterpret_cast<const T_OpDerefRet*>(impl().get_raw_value());
+ }
+
+ inline T_OpDerefRet* operator->()
+ {
+ return const_cast<T_OpDerefRet*>(std::as_const(*this).operator->());
+ }
+
+ inline auto operator!()
+ {
+ if_constexpr_named(cond1, std::is_pointer_v<T>)
+ {
+ return impl() == nullptr;
+ }
+ else if_constexpr_named(cond2, std::is_same_v<std::remove_cv_t<T>, bool>)
+ {
+ return impl() == false;
+ }
+ else
+ {
+ auto unknownCase = !(cond1 || cond2);
+ rlbox_detail_static_fail_because(
+ unknownCase,
+ "Operator ! only permitted for pointer or boolean types. For other"
+ "types, unwrap the tainted value with the copy_and_verify API and then"
+ "use operator !");
+ }
+ }
+
+ /**
+ * @brief Copy tainted value from sandbox and verify it.
+ *
+ * @param verifier Function used to verify the copied value.
+ * @tparam T_Func the type of the verifier.
+ * @return Whatever the verifier function returns.
+ */
+ template<typename T_Func>
+ inline auto copy_and_verify(T_Func verifier) const
+ {
+ using T_Deref = std::remove_cv_t<std::remove_pointer_t<T>>;
+
+ if_constexpr_named(cond1, detail::is_fundamental_or_enum_v<T>)
+ {
+ auto val = impl().get_raw_value();
+ return verifier(val);
+ }
+ else if_constexpr_named(
+ cond2, detail::is_one_level_ptr_v<T> && !std::is_class_v<T_Deref>)
+ {
+ // Some paths don't use the verifier
+ RLBOX_UNUSED(verifier);
+
+ if_constexpr_named(subcond1, std::is_void_v<T_Deref>)
+ {
+ rlbox_detail_static_fail_because(
+ subcond1,
+ "copy_and_verify not recommended for void* as it could lead to some "
+ "anti-patterns in verifiers. Cast it to a different tainted pointer "
+ "with sandbox_reinterpret_cast and then call copy_and_verify. "
+ "Alternately, you can use the UNSAFE_unverified API to do this "
+ "without casting.");
+ return nullptr;
+ }
+ // Test with detail::is_func_ptr_v to check for member funcs also
+ else if_constexpr_named(subcond2, detail::is_func_ptr_v<T>)
+ {
+ rlbox_detail_static_fail_because(
+ subcond2,
+ "copy_and_verify cannot be applied to function pointers as this "
+ "makes a deep copy. This is not possible for function pointers. "
+ "Consider copy_and_verify_address instead.");
+ return nullptr;
+ }
+ else
+ {
+ auto val = impl().get_raw_value();
+ if (val == nullptr) {
+ return verifier(nullptr);
+ } else {
+ // Important to assign to a local variable (i.e. make a copy)
+ // Else, for tainted_volatile, this will allow a
+ // time-of-check-time-of-use attack
+ auto val_copy = std::make_unique<T_Deref>();
+ *val_copy = *val;
+ return verifier(std::move(val_copy));
+ }
+ }
+ }
+ else if_constexpr_named(
+ cond3, detail::is_one_level_ptr_v<T> && std::is_class_v<T_Deref>)
+ {
+ auto val_copy = std::make_unique<tainted<T_Deref, T_Sbx>>(*impl());
+ return verifier(std::move(val_copy));
+ }
+ else if_constexpr_named(cond4, std::is_array_v<T>)
+ {
+ static_assert(
+ detail::is_fundamental_or_enum_v<std::remove_all_extents_t<T>>,
+ "copy_and_verify on arrays is only safe for fundamental or enum types. "
+ "For arrays of other types, apply copy_and_verify on each element "
+ "individually --- a[i].copy_and_verify(...)");
+
+ auto copy = impl().get_raw_value();
+ return verifier(copy);
+ }
+ else
+ {
+ auto unknownCase = !(cond1 || cond2 || cond3 || cond4);
+ rlbox_detail_static_fail_because(
+ unknownCase,
+ "copy_and_verify not supported for this type as it may be unsafe");
+ }
+ }
+
+private:
+ using T_CopyAndVerifyRangeEl =
+ detail::valid_array_el_t<std::remove_cv_t<std::remove_pointer_t<T>>>;
+
+ // Template needed to ensure that function isn't instantiated for unsupported
+ // types like function pointers which causes compile errors...
+ template<typename T2 = T>
+ inline const void* verify_range_helper(std::size_t count) const
+ {
+ static_assert(std::is_pointer_v<T>);
+ static_assert(detail::is_fundamental_or_enum_v<T_CopyAndVerifyRangeEl>);
+
+ detail::dynamic_check(
+ count != 0,
+ "Called copy_and_verify_range/copy_and_verify_string with count 0");
+
+ auto start = reinterpret_cast<const void*>(impl().get_raw_value());
+ if (start == nullptr) {
+ return nullptr;
+ }
+
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(
+ start, count * sizeof(T_CopyAndVerifyRangeEl));
+
+ return start;
+ }
+
+ template<typename T2 = T>
+ inline std::unique_ptr<T_CopyAndVerifyRangeEl[]> copy_and_verify_range_helper(
+ std::size_t count) const
+ {
+ const void* start = verify_range_helper(count);
+ if (start == nullptr) {
+ return nullptr;
+ }
+
+ auto target = std::make_unique<T_CopyAndVerifyRangeEl[]>(count);
+
+ for (size_t i = 0; i < count; i++) {
+ auto p_src_i_tainted = &(impl()[i]);
+ auto p_src_i = p_src_i_tainted.get_raw_value();
+ detail::convert_type_fundamental_or_array(target[i], *p_src_i);
+ }
+
+ return target;
+ }
+
+public:
+ /**
+ * @brief Copy a range of tainted values from sandbox and verify them.
+ *
+ * @param verifier Function used to verify the copied value.
+ * @param count Number of elements to copy.
+ * @tparam T_Func the type of the verifier. If the tainted type is ``int*``
+ * then ``T_Func = T_Ret(*)(unique_ptr<int[]>)``.
+ * @return Whatever the verifier function returns.
+ */
+ template<typename T_Func>
+ inline auto copy_and_verify_range(T_Func verifier, std::size_t count) const
+ {
+ static_assert(std::is_pointer_v<T>,
+ "Can only call copy_and_verify_range on pointers");
+
+ static_assert(
+ detail::is_fundamental_or_enum_v<T_CopyAndVerifyRangeEl>,
+ "copy_and_verify_range is only safe for ranges of "
+ "fundamental or enum types. For other types, call "
+ "copy_and_verify on each element --- a[i].copy_and_verify(...)");
+
+ std::unique_ptr<T_CopyAndVerifyRangeEl[]> target =
+ copy_and_verify_range_helper(count);
+ return verifier(std::move(target));
+ }
+
+ /**
+ * @brief Copy a tainted string from sandbox and verify it.
+ *
+ * @param verifier Function used to verify the copied value.
+ * @tparam T_Func the type of the verifier either
+ * ``T_Ret(*)(unique_ptr<char[]>)`` or ``T_Ret(*)(std::string)``
+ * @return Whatever the verifier function returns.
+ */
+ template<typename T_Func>
+ inline auto copy_and_verify_string(T_Func verifier) const
+ {
+ static_assert(std::is_pointer_v<T>,
+ "Can only call copy_and_verify_string on pointers");
+
+ static_assert(std::is_same_v<char, T_CopyAndVerifyRangeEl>,
+ "copy_and_verify_string only allows char*");
+
+ using T_VerifParam = detail::func_first_arg_t<T_Func>;
+
+ auto start = impl().get_raw_value();
+ if_constexpr_named(
+ cond1,
+ std::is_same_v<T_VerifParam, std::unique_ptr<char[]>> ||
+ std::is_same_v<T_VerifParam, std::unique_ptr<const char[]>>)
+ {
+ if (start == nullptr) {
+ return verifier(nullptr);
+ }
+
+ // it is safe to run strlen on a tainted<string> as worst case, the string
+ // does not have a null and we try to copy all the memory out of the
+ // sandbox however, copy_and_verify_range ensures that we never copy
+ // memory outsider the range
+ auto str_len = std::strlen(start) + 1;
+ std::unique_ptr<T_CopyAndVerifyRangeEl[]> target =
+ copy_and_verify_range_helper(str_len);
+
+ // ensure the string has a trailing null
+ target[str_len - 1] = '\0';
+
+ return verifier(std::move(target));
+ }
+ else if_constexpr_named(cond2, std::is_same_v<T_VerifParam, std::string>)
+ {
+ if (start == nullptr) {
+ std::string param = "";
+ return verifier(param);
+ }
+
+ // it is safe to run strlen on a tainted<string> as worst case, the string
+ // does not have a null and we try to copy all the memory out of the
+ // sandbox however, copy_and_verify_range ensures that we never copy
+ // memory outsider the range
+ auto str_len = std::strlen(start) + 1;
+
+ const char* checked_start = (const char*)verify_range_helper(str_len);
+ if (checked_start == nullptr) {
+ std::string param = "";
+ return verifier(param);
+ }
+
+ std::string copy(checked_start, str_len - 1);
+ return verifier(std::move(copy));
+ }
+ else
+ {
+ constexpr bool unknownCase = !(cond1 || cond2);
+ rlbox_detail_static_fail_because(
+ unknownCase,
+ "copy_and_verify_string verifier parameter should either be "
+ "unique_ptr<char[]>, unique_ptr<const char[]> or std::string");
+ }
+ }
+
+ /**
+ * @brief Copy a tainted pointer from sandbox and verify the address.
+ *
+ * This function is useful if you need to verify physical bits representing
+ * the address of a pointer. Other APIs such as copy_and_verify performs a
+ * deep copy and changes the address bits.
+ *
+ * @param verifier Function used to verify the copied value.
+ * @tparam T_Func the type of the verifier ``T_Ret(*)(uintptr_t)``
+ * @return Whatever the verifier function returns.
+ */
+ template<typename T_Func>
+ inline auto copy_and_verify_address(T_Func verifier) const
+ {
+ static_assert(std::is_pointer_v<T>,
+ "copy_and_verify_address must be used on pointers");
+ auto val = reinterpret_cast<uintptr_t>(impl().get_raw_value());
+ return verifier(val);
+ }
+
+ /**
+ * @brief Copy a tainted pointer to a buffer from sandbox and verify the
+ * address.
+ *
+ * This function is useful if you need to verify physical bits representing
+ * the address of a buffer. Other APIs such as copy_and_verify performs a
+ * deep copy and changes the address bits.
+ *
+ * @param verifier Function used to verify the copied value.
+ * @param size Size of the buffer. Buffer with length size is expected to fit
+ * inside sandbox memory.
+ * @tparam T_Func the type of the verifier ``T_Ret(*)(uintptr_t)``
+ * @return Whatever the verifier function returns.
+ */
+ template<typename T_Func>
+ inline auto copy_and_verify_buffer_address(T_Func verifier,
+ std::size_t size) const
+ {
+ static_assert(std::is_pointer_v<T>,
+ "copy_and_verify_address must be used on pointers");
+ auto val = reinterpret_cast<uintptr_t>(verify_range_helper(size));
+ return verifier(val);
+ }
+};
+
+#define BinaryOpWrappedRhs(opSymbol) \
+ template<template<typename, typename> typename T_Wrap, \
+ typename T, \
+ typename T_Sbx, \
+ typename T_Lhs, \
+ RLBOX_ENABLE_IF(!detail::rlbox_is_wrapper_v<T_Lhs> && \
+ !detail::rlbox_is_tainted_boolean_hint_v<T_Lhs>)> \
+ inline constexpr auto operator opSymbol( \
+ const T_Lhs& lhs, const tainted_base_impl<T_Wrap, T, T_Sbx>& rhs) \
+ { \
+ /* Handles the case for "3 + tainted", where + is a binary op */ \
+ /* Technically pointer arithmetic can be performed as 3 + tainted_ptr */ \
+ /* as well. However, this is unusual and to keep the code simple we do */ \
+ /* not support this. */ \
+ static_assert( \
+ std::is_arithmetic_v<T_Lhs>, \
+ "Binary expressions between an non tainted type and tainted" \
+ "type is only permitted if the first value is the tainted type. Try " \
+ "changing the order of the binary expression accordingly"); \
+ auto ret = tainted<T_Lhs, T_Sbx>(lhs) opSymbol rhs.impl(); \
+ return ret; \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+BinaryOpWrappedRhs(+);
+BinaryOpWrappedRhs(-);
+BinaryOpWrappedRhs(*);
+BinaryOpWrappedRhs(/);
+BinaryOpWrappedRhs(%);
+BinaryOpWrappedRhs(^);
+BinaryOpWrappedRhs(&);
+BinaryOpWrappedRhs(|);
+BinaryOpWrappedRhs(<<);
+BinaryOpWrappedRhs(>>);
+BinaryOpWrappedRhs(==);
+BinaryOpWrappedRhs(!=);
+BinaryOpWrappedRhs(<);
+BinaryOpWrappedRhs(<=);
+BinaryOpWrappedRhs(>);
+BinaryOpWrappedRhs(>=);
+#undef BinaryOpWrappedRhs
+
+#define BooleanBinaryOpWrappedRhs(opSymbol) \
+ template<template<typename, typename> typename T_Wrap, \
+ typename T, \
+ typename T_Sbx, \
+ typename T_Lhs, \
+ RLBOX_ENABLE_IF(!detail::rlbox_is_wrapper_v<T_Lhs> && \
+ !detail::rlbox_is_tainted_boolean_hint_v<T_Lhs>)> \
+ inline constexpr auto operator opSymbol( \
+ const T_Lhs& lhs, const tainted_base_impl<T_Wrap, T, T_Sbx>& rhs) \
+ { \
+ static_assert( \
+ std::is_arithmetic_v<T_Lhs>, \
+ "Binary expressions between an non tainted type and tainted" \
+ "type is only permitted if the first value is the tainted type. Try " \
+ "changing the order of the binary expression accordingly"); \
+ auto ret = tainted<T_Lhs, T_Sbx>(lhs) opSymbol rhs.impl(); \
+ return ret; \
+ } \
+ \
+ template<template<typename, typename> typename T_Wrap, \
+ typename T, \
+ typename T_Sbx, \
+ typename T_Lhs, \
+ RLBOX_ENABLE_IF(!detail::rlbox_is_wrapper_v<T_Lhs> && \
+ !detail::rlbox_is_tainted_boolean_hint_v<T_Lhs>)> \
+ inline constexpr auto operator opSymbol( \
+ const T_Lhs&, const tainted_base_impl<T_Wrap, T, T_Sbx>&&) \
+ { \
+ rlbox_detail_static_fail_because( \
+ detail::true_v<T_Lhs>, \
+ "C++ does not permit safe overloading of && and || operations as this " \
+ "affects the short circuiting behaviour of these operations. RLBox " \
+ "does let you use && and || with tainted in limited situations - when " \
+ "all arguments starting from the second are local variables. It does " \
+ "not allow it if arguments starting from the second are expressions.\n" \
+ "For example the following is not allowed\n" \
+ "\n" \
+ "tainted<bool, T_Sbx> a = true;\n" \
+ "auto r = a && true && sandbox.invoke_sandbox_function(getBool);\n" \
+ "\n" \
+ "However the following would be allowed\n" \
+ "tainted<bool, T_Sbx> a = true;\n" \
+ "auto b = true\n" \
+ "auto c = sandbox.invoke_sandbox_function(getBool);\n" \
+ "auto r = a && b && c;\n" \
+ "\n" \
+ "Note that these 2 programs are not identical. The first program may " \
+ "or may not call getBool, while second program always calls getBool"); \
+ return tainted<bool, T_Sbx>(false); \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+BooleanBinaryOpWrappedRhs(&&);
+BooleanBinaryOpWrappedRhs(||);
+#undef BooleanBinaryOpWrappedRhs
+
+namespace tainted_detail {
+ template<typename T, typename T_Sbx>
+ using tainted_repr_t = detail::c_to_std_array_t<T>;
+
+ template<typename T, typename T_Sbx>
+ using tainted_vol_repr_t =
+ detail::c_to_std_array_t<std::add_volatile_t<typename rlbox_sandbox<
+ T_Sbx>::template convert_to_sandbox_equivalent_nonclass_t<T>>>;
+}
+
+/**
+ * @brief Tainted values represent untrusted values that originate from the
+ * sandbox.
+ */
+template<typename T, typename T_Sbx>
+class tainted : public tainted_base_impl<tainted, T, T_Sbx>
+{
+ KEEP_CLASSES_FRIENDLY
+ KEEP_CAST_FRIENDLY
+
+ // Classes recieve their own specialization
+ static_assert(
+ !std::is_class_v<T>,
+ "Missing definition for class T. This error occurs for one "
+ "of 2 reasons.\n"
+ " 1) Make sure you have include a call rlbox_load_structs_from_library "
+ "for this library with this class included.\n"
+ " 2) Make sure you run (re-run) the struct-dump tool to list "
+ "all structs in use by your program.\n");
+
+ static_assert(
+ detail::is_basic_type_v<T> || std::is_array_v<T>,
+ "Tainted types only support fundamental, enum, pointer, array and struct "
+ "types. Please file a bug if more support is needed.");
+
+private:
+ using T_ClassBase = tainted_base_impl<tainted, T, T_Sbx>;
+ using T_AppType = tainted_detail::tainted_repr_t<T, T_Sbx>;
+ using T_SandboxedType = tainted_detail::tainted_vol_repr_t<T, T_Sbx>;
+ T_AppType data;
+
+ inline auto& get_raw_value_ref() noexcept { return data; }
+ inline auto& get_raw_value_ref() const noexcept { return data; }
+
+ inline std::remove_cv_t<T_AppType> get_raw_value() const noexcept
+ {
+ return data;
+ }
+
+ inline std::remove_cv_t<T_SandboxedType> get_raw_sandbox_value(
+ rlbox_sandbox<T_Sbx>& sandbox) const
+ {
+ std::remove_cv_t<T_SandboxedType> ret;
+
+ using namespace detail;
+ convert_type_non_class<T_Sbx,
+ adjust_type_direction::TO_SANDBOX,
+ adjust_type_context::SANDBOX>(
+ ret, data, nullptr /* example_unsandboxed_ptr */, &sandbox);
+ return ret;
+ };
+
+ inline const void* find_example_pointer_or_null() const noexcept
+ {
+ if constexpr (std::is_array_v<T>) {
+ auto& data_ref = get_raw_value_ref();
+
+ for (size_t i = 0; i < std::extent_v<T>; i++) {
+ const void* ret = data[i].find_example_pointer_or_null();
+ if (ret != nullptr) {
+ return ret;
+ }
+ }
+ } else if constexpr (std::is_pointer_v<T> && !detail::is_func_ptr_v<T>) {
+ auto data = get_raw_value();
+ return data;
+ }
+ return nullptr;
+ }
+
+ // Initializing with a pointer is dangerous and permitted only internally
+ template<typename T2 = T, RLBOX_ENABLE_IF(std::is_pointer_v<T2>)>
+ tainted(T2 val, const void* /* internal_tag */)
+ : data(val)
+ {
+ // Sanity check
+ static_assert(std::is_pointer_v<T>);
+ }
+
+ template<typename T_Rhs>
+ static inline tainted<T, T_Sbx> internal_factory(T_Rhs&& rhs)
+ {
+ if constexpr (std::is_pointer_v<std::remove_reference_t<T_Rhs>>) {
+ const void* internal_tag = nullptr;
+ return tainted(std::forward<T_Rhs>(rhs), internal_tag);
+ } else {
+ return tainted(std::forward<T_Rhs>(rhs));
+ }
+ }
+
+public:
+ tainted() = default;
+ tainted(const tainted<T, T_Sbx>& p) = default;
+
+ tainted(const tainted_volatile<T, T_Sbx>& p)
+ {
+ // Need to construct an example_unsandboxed_ptr for pointers or arrays of
+ // pointers. Since tainted_volatile is the type of data in sandbox memory,
+ // the address of data (&data) refers to a location in sandbox memory and
+ // can thus be the example_unsandboxed_ptr
+ const volatile void* p_data_ref = &p.get_sandbox_value_ref();
+ const void* example_unsandboxed_ptr = const_cast<const void*>(p_data_ref);
+ using namespace detail;
+ convert_type_non_class<T_Sbx,
+ adjust_type_direction::TO_APPLICATION,
+ adjust_type_context::EXAMPLE>(
+ get_raw_value_ref(),
+ p.get_sandbox_value_ref(),
+ example_unsandboxed_ptr,
+ nullptr /* sandbox_ptr */);
+ }
+
+ // Initializing with a pointer is dangerous and permitted only internally
+ template<typename T2 = T, RLBOX_ENABLE_IF(std::is_pointer_v<T2>)>
+ tainted(T2 val)
+ : data(val)
+ {
+ rlbox_detail_static_fail_because(
+ std::is_pointer_v<T2>,
+ "Assignment of pointers is not safe as it could\n "
+ "1) Leak pointers from the appliction to the sandbox which may break "
+ "ASLR\n "
+ "2) Pass inaccessible pointers to the sandbox leading to crash\n "
+ "3) Break sandboxes that require pointers to be swizzled first\n "
+ "\n "
+ "Instead, if you want to pass in a pointer, do one of the following\n "
+ "1) Allocate with malloc_in_sandbox, and pass in a tainted pointer\n "
+ "2) For pointers that point to functions in the application, register "
+ "with sandbox.register_callback(\"foo\"), and pass in the registered "
+ "value\n "
+ "3) For pointers that point to functions in the sandbox, get the "
+ "address with get_sandbox_function_address(sandbox, foo), and pass in "
+ "the "
+ "address\n "
+ "4) For raw pointers, use assign_raw_pointer which performs required "
+ "safety checks\n ");
+ }
+
+ tainted(
+ const sandbox_callback<
+ detail::function_ptr_t<T> // Need to ensure we never generate code that
+ // creates a sandbox_callback of a non function
+ ,
+ T_Sbx>&)
+ {
+ rlbox_detail_static_fail_because(
+ detail::true_v<T>,
+ "RLBox does not support assigning sandbox_callback values to tainted "
+ "types (i.e. types that live in application memory).\n"
+ "If you still want to do this, consider changing your code to store the "
+ "value in sandbox memory as follows. Convert\n\n"
+ "sandbox_callback<T_Func, Sbx> cb = ...;\n"
+ "tainted<T_Func, Sbx> foo = cb;\n\n"
+ "to\n\n"
+ "tainted<T_Func*, Sbx> foo_ptr = sandbox.malloc_in_sandbox<T_Func*>();\n"
+ "*foo_ptr = cb;\n\n"
+ "This would keep the assignment in sandbox memory");
+ }
+
+ tainted(const std::nullptr_t& arg)
+ : data(arg)
+ {
+ static_assert(std::is_pointer_v<T>);
+ }
+
+ // We explicitly disable this constructor if it has one of the signatures
+ // above, so that we give the above constructors a higher priority. We only
+ // allow this for fundamental types as this is potentially unsafe for pointers
+ // and structs
+ template<typename T_Arg,
+ RLBOX_ENABLE_IF(
+ !detail::rlbox_is_wrapper_v<std::remove_reference_t<T_Arg>> &&
+ detail::is_fundamental_or_enum_v<T> &&
+ detail::is_fundamental_or_enum_v<std::remove_reference_t<T_Arg>>)>
+ tainted(T_Arg&& arg)
+ : data(std::forward<T_Arg>(arg))
+ {}
+
+ template<typename T_Rhs>
+ void assign_raw_pointer(rlbox_sandbox<T_Sbx>& sandbox, T_Rhs val)
+ {
+ static_assert(std::is_pointer_v<T_Rhs>, "Must be a pointer");
+ static_assert(std::is_assignable_v<T&, T_Rhs>,
+ "Should assign pointers of compatible types.");
+ // Maybe a function pointer, so we need to cast
+ const void* cast_val = reinterpret_cast<const void*>(val);
+ bool safe = sandbox.is_pointer_in_sandbox_memory(cast_val);
+ detail::dynamic_check(
+ safe,
+ "Tried to assign a pointer that is not in the sandbox.\n "
+ "This is not safe as it could\n "
+ "1) Leak pointers from the appliction to the sandbox which may break "
+ "ASLR\n "
+ "2) Pass inaccessible pointers to the sandbox leading to crash\n "
+ "3) Break sandboxes that require pointers to be swizzled first\n "
+ "\n "
+ "Instead, if you want to pass in a pointer, do one of the following\n "
+ "1) Allocate with malloc_in_sandbox, and pass in a tainted pointer\n "
+ "2) For pointers that point to functions in the application, register "
+ "with sandbox.register_callback(\"foo\"), and pass in the registered "
+ "value\n "
+ "3) For pointers that point to functions in the sandbox, get the "
+ "address with get_sandbox_function_address(sandbox, foo), and pass in "
+ "the "
+ "address\n ");
+ data = val;
+ }
+
+ inline tainted_opaque<T, T_Sbx> to_opaque()
+ {
+ return *reinterpret_cast<tainted_opaque<T, T_Sbx>*>(this);
+ }
+
+ template<typename T_Dummy = void>
+ operator bool() const
+ {
+ if_constexpr_named(cond1, std::is_pointer_v<T>)
+ {
+ // We return this without the tainted wrapper as the checking for null
+ // doesn't really "induce" tainting in the application If the
+ // application is checking this pointer for null, then it is robust to
+ // this pointer being null or not null
+ return get_raw_value() != nullptr;
+ }
+ else
+ {
+ auto unknownCase = !(cond1);
+ rlbox_detail_static_fail_because(
+ unknownCase,
+ "Implicit conversion to bool is only permitted for pointer types. For "
+ "other types, unwrap the tainted value with the copy_and_verify API "
+ "and then perform the required checks");
+ }
+ }
+};
+
+template<typename T, typename T_Sbx>
+inline tainted<T, T_Sbx> from_opaque(tainted_opaque<T, T_Sbx> val)
+{
+ return *reinterpret_cast<tainted<T, T_Sbx>*>(&val);
+}
+
+/**
+ * @brief Tainted volatile values are like tainted values but still point to
+ * sandbox memory. Dereferencing a tainted pointer produces a tainted_volatile.
+ */
+template<typename T, typename T_Sbx>
+class tainted_volatile : public tainted_base_impl<tainted_volatile, T, T_Sbx>
+{
+ KEEP_CLASSES_FRIENDLY
+ KEEP_CAST_FRIENDLY
+
+ // Classes recieve their own specialization
+ static_assert(
+ !std::is_class_v<T>,
+ "Missing definition for class T. This error occurs for one "
+ "of 2 reasons.\n"
+ " 1) Make sure you have include a call rlbox_load_structs_from_library "
+ "for this library with this class included.\n"
+ " 2) Make sure you run (re-run) the struct-dump tool to list "
+ "all structs in use by your program.\n");
+
+ static_assert(
+ detail::is_basic_type_v<T> || std::is_array_v<T>,
+ "Tainted types only support fundamental, enum, pointer, array and struct "
+ "types. Please file a bug if more support is needed.");
+
+private:
+ using T_ClassBase = tainted_base_impl<tainted_volatile, T, T_Sbx>;
+ using T_AppType = tainted_detail::tainted_repr_t<T, T_Sbx>;
+ using T_SandboxedType = tainted_detail::tainted_vol_repr_t<T, T_Sbx>;
+ T_SandboxedType data;
+
+ inline auto& get_sandbox_value_ref() noexcept { return data; }
+ inline auto& get_sandbox_value_ref() const noexcept { return data; }
+
+ inline std::remove_cv_t<T_AppType> get_raw_value() const
+ {
+ std::remove_cv_t<T_AppType> ret;
+ // Need to construct an example_unsandboxed_ptr for pointers or arrays of
+ // pointers. Since tainted_volatile is the type of data in sandbox memory,
+ // the address of data (&data) refers to a location in sandbox memory and
+ // can thus be the example_unsandboxed_ptr
+ const volatile void* data_ref = &data;
+ const void* example_unsandboxed_ptr = const_cast<const void*>(data_ref);
+ using namespace detail;
+ convert_type_non_class<T_Sbx,
+ adjust_type_direction::TO_APPLICATION,
+ adjust_type_context::EXAMPLE>(
+ ret, data, example_unsandboxed_ptr, nullptr /* sandbox_ptr */);
+ return ret;
+ }
+
+ inline std::remove_cv_t<T_SandboxedType> get_raw_sandbox_value()
+ const noexcept
+ {
+ return data;
+ };
+
+ inline std::remove_cv_t<T_SandboxedType> get_raw_sandbox_value(
+ rlbox_sandbox<T_Sbx>& sandbox) const noexcept
+ {
+ RLBOX_UNUSED(sandbox);
+ return data;
+ };
+
+ tainted_volatile() = default;
+ tainted_volatile(const tainted_volatile<T, T_Sbx>& p) = default;
+
+public:
+ inline tainted<const T*, T_Sbx> operator&() const noexcept
+ {
+ auto ref =
+ detail::remove_volatile_from_ptr_cast(&this->get_sandbox_value_ref());
+ auto ref_cast = reinterpret_cast<const T*>(ref);
+ return tainted<const T*, T_Sbx>::internal_factory(ref_cast);
+ }
+
+ inline tainted<T*, T_Sbx> operator&() noexcept
+ {
+ return sandbox_const_cast<T*>(&std::as_const(*this));
+ }
+
+ // Needed as the definition of unary & above shadows the base's binary &
+ rlbox_detail_forward_binop_to_base(&, T_ClassBase);
+
+ template<typename T_RhsRef>
+ inline tainted_volatile<T, T_Sbx>& operator=(T_RhsRef&& val)
+ {
+ using T_Rhs = std::remove_reference_t<T_RhsRef>;
+ using T_Rhs_El = std::remove_all_extents_t<T_Rhs>;
+
+ // Need to construct an example_unsandboxed_ptr for pointers or arrays of
+ // pointers. Since tainted_volatile is the type of data in sandbox memory,
+ // the address of data (&data) refers to a location in sandbox memory and
+ // can thus be the example_unsandboxed_ptr
+ const volatile void* data_ref = &get_sandbox_value_ref();
+ const void* example_unsandboxed_ptr = const_cast<const void*>(data_ref);
+ // Some branches don't use this
+ RLBOX_UNUSED(example_unsandboxed_ptr);
+
+ if_constexpr_named(
+ cond1, std::is_same_v<std::remove_const_t<T_Rhs>, std::nullptr_t>)
+ {
+ static_assert(std::is_pointer_v<T>,
+ "Null pointer can only be assigned to pointers");
+ // assign using an integer instead of nullptr, as the pointer field may be
+ // represented as integer
+ data = 0;
+ }
+ else if_constexpr_named(cond2, detail::rlbox_is_tainted_v<T_Rhs>)
+ {
+ using namespace detail;
+ convert_type_non_class<T_Sbx,
+ adjust_type_direction::TO_SANDBOX,
+ adjust_type_context::EXAMPLE>(
+ get_sandbox_value_ref(),
+ val.get_raw_value_ref(),
+ example_unsandboxed_ptr,
+ nullptr /* sandbox_ptr */);
+ }
+ else if_constexpr_named(cond3, detail::rlbox_is_tainted_volatile_v<T_Rhs>)
+ {
+ using namespace detail;
+ convert_type_non_class<T_Sbx,
+ adjust_type_direction::NO_CHANGE,
+ adjust_type_context::EXAMPLE>(
+ get_sandbox_value_ref(),
+ val.get_sandbox_value_ref(),
+ example_unsandboxed_ptr,
+ nullptr /* sandbox_ptr */);
+ }
+ else if_constexpr_named(cond4, detail::rlbox_is_sandbox_callback_v<T_Rhs>)
+ {
+ using T_RhsFunc = detail::rlbox_remove_wrapper_t<T_Rhs>;
+
+ // need to perform some typechecking to ensure we are assigning compatible
+ // function pointer types only
+ if_constexpr_named(subcond1, !std::is_assignable_v<T&, T_RhsFunc>)
+ {
+ rlbox_detail_static_fail_because(
+ subcond1,
+ "Trying to assign function pointer to field of incompatible types");
+ }
+ else
+ {
+ // Need to reinterpret_cast as the representation of the signature of a
+ // callback uses the machine model of the sandbox, while the field uses
+ // that of the application. But we have already checked above that this
+ // is safe.
+ auto func = val.get_raw_sandbox_value();
+ using T_Cast = std::remove_volatile_t<T_SandboxedType>;
+ get_sandbox_value_ref() = (T_Cast)func;
+ }
+ }
+ else if_constexpr_named(
+ cond5,
+ detail::is_fundamental_or_enum_v<T> ||
+ (std::is_array_v<T> && !std::is_pointer_v<T_Rhs_El>))
+ {
+ detail::convert_type_fundamental_or_array(get_sandbox_value_ref(), val);
+ }
+ else if_constexpr_named(
+ cond6, std::is_pointer_v<T_Rhs> || std::is_pointer_v<T_Rhs_El>)
+ {
+ rlbox_detail_static_fail_because(
+ cond6,
+ "Assignment of pointers is not safe as it could\n "
+ "1) Leak pointers from the appliction to the sandbox which may break "
+ "ASLR\n "
+ "2) Pass inaccessible pointers to the sandbox leading to crash\n "
+ "3) Break sandboxes that require pointers to be swizzled first\n "
+ "\n "
+ "Instead, if you want to pass in a pointer, do one of the following\n "
+ "1) Allocate with malloc_in_sandbox, and pass in a tainted pointer\n "
+ "2) For pointers that point to functions in the application, register "
+ "with sandbox.register_callback(\"foo\"), and pass in the registered "
+ "value\n "
+ "3) For pointers that point to functions in the sandbox, get the "
+ "address with get_sandbox_function_address(sandbox, foo), and pass in "
+ "the "
+ "address\n "
+ "4) For raw pointers, use assign_raw_pointer which performs required "
+ "safety checks\n ");
+ }
+ else
+ {
+ auto unknownCase =
+ !(cond1 || cond2 || cond3 || cond4 || cond5 /* || cond6 */);
+ rlbox_detail_static_fail_because(
+ unknownCase, "Assignment of the given type of value is not supported");
+ }
+
+ return *this;
+ }
+
+ template<typename T_Rhs>
+ void assign_raw_pointer(rlbox_sandbox<T_Sbx>& sandbox, T_Rhs val)
+ {
+ static_assert(std::is_pointer_v<T_Rhs>, "Must be a pointer");
+ static_assert(std::is_assignable_v<T&, T_Rhs>,
+ "Should assign pointers of compatible types.");
+ // Maybe a function pointer, so we need to cast
+ const void* cast_val = reinterpret_cast<const void*>(val);
+ bool safe = sandbox.is_pointer_in_sandbox_memory(cast_val);
+ detail::dynamic_check(
+ safe,
+ "Tried to assign a pointer that is not in the sandbox.\n "
+ "This is not safe as it could\n "
+ "1) Leak pointers from the appliction to the sandbox which may break "
+ "ASLR\n "
+ "2) Pass inaccessible pointers to the sandbox leading to crash\n "
+ "3) Break sandboxes that require pointers to be swizzled first\n "
+ "\n "
+ "Instead, if you want to pass in a pointer, do one of the following\n "
+ "1) Allocate with malloc_in_sandbox, and pass in a tainted pointer\n "
+ "2) For pointers that point to functions in the application, register "
+ "with sandbox.register_callback(\"foo\"), and pass in the registered "
+ "value\n "
+ "3) For pointers that point to functions in the sandbox, get the "
+ "address with get_sandbox_function_address(sandbox, foo), and pass in "
+ "the "
+ "address\n ");
+ get_sandbox_value_ref() =
+ sandbox.template get_sandboxed_pointer<T_Rhs>(cast_val);
+ }
+
+ template<typename T_Dummy = void>
+ operator bool() const
+ {
+ rlbox_detail_static_fail_because(
+ detail::true_v<T_Dummy>,
+ "Cannot apply implicit conversion to bool on values that are located in "
+ "sandbox memory. This error occurs if you compare a dereferenced value "
+ "such as the code shown below\n\n"
+ "tainted<int**> a = ...;\n"
+ "assert(*a);\n\n"
+ "Instead you can write this code as \n"
+ "tainted<int*> temp = *a;\n"
+ "assert(temp);\n");
+ return false;
+ }
+};
+
+}
diff --git a/third_party/rlbox/include/rlbox_app_pointer.hpp b/third_party/rlbox/include/rlbox_app_pointer.hpp
new file mode 100644
index 0000000000..5af4876867
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_app_pointer.hpp
@@ -0,0 +1,93 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <map>
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# include <shared_mutex>
+#endif
+#include <type_traits>
+
+#include "rlbox_helpers.hpp"
+#include "rlbox_type_traits.hpp"
+#include "rlbox_types.hpp"
+
+namespace rlbox {
+
+template<typename T_PointerType>
+class app_pointer_map
+{
+
+private:
+ using T_PointerTypeUnsigned = detail::unsigned_int_of_size_t<T_PointerType>;
+
+ std::map<T_PointerTypeUnsigned, void*> pointer_map;
+ T_PointerTypeUnsigned counter = 1;
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_SHARED_LOCK(map_mutex);
+#endif
+
+ T_PointerType get_unused_index(T_PointerType max_ptr_val)
+ {
+ const auto max_val = (T_PointerTypeUnsigned)max_ptr_val;
+ for (T_PointerTypeUnsigned i = counter; i <= max_val; i++) {
+ if (pointer_map.find(i) == pointer_map.end()) {
+ counter = i + 1;
+ return (T_PointerType)i;
+ }
+ }
+ for (T_PointerTypeUnsigned i = 1; i < counter; i++) {
+ if (pointer_map.find(i) == pointer_map.end()) {
+ counter = i + 1;
+ return (T_PointerType)i;
+ }
+ }
+ detail::dynamic_check(false, "Could not find free app pointer slot");
+ return 0;
+ }
+
+public:
+ app_pointer_map()
+ {
+ // ensure we can't use app pointer 0 as this is sometimes confused as null
+ // by the sandbox
+ pointer_map[0] = nullptr;
+ }
+
+ T_PointerType get_app_pointer_idx(void* ptr, T_PointerType max_ptr_val)
+ {
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, map_mutex);
+#endif
+ T_PointerType idx = get_unused_index(max_ptr_val);
+ T_PointerTypeUnsigned idx_int = (T_PointerTypeUnsigned)idx;
+ pointer_map[idx_int] = ptr;
+ return idx;
+ }
+
+ void remove_app_ptr(T_PointerType idx)
+ {
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, map_mutex);
+#endif
+ T_PointerTypeUnsigned idx_int = (T_PointerTypeUnsigned)idx;
+ auto it = pointer_map.find(idx_int);
+ detail::dynamic_check(it != pointer_map.end(),
+ "Error: removing a non-existing app pointer");
+ pointer_map.erase(it);
+ }
+
+ void* lookup_index(T_PointerType idx)
+ {
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, map_mutex);
+#endif
+ T_PointerTypeUnsigned idx_int = (T_PointerTypeUnsigned)idx;
+ auto it = pointer_map.find(idx_int);
+ detail::dynamic_check(it != pointer_map.end(),
+ "Error: looking up a non-existing app pointer");
+ return it->second;
+ }
+};
+
+} \ No newline at end of file
diff --git a/third_party/rlbox/include/rlbox_conversion.hpp b/third_party/rlbox/include/rlbox_conversion.hpp
new file mode 100644
index 0000000000..e82d0d5da0
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_conversion.hpp
@@ -0,0 +1,273 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <array>
+#include <cstring>
+#include <limits>
+#include <type_traits>
+
+#include "rlbox_helpers.hpp"
+#include "rlbox_type_traits.hpp"
+#include "rlbox_types.hpp"
+
+namespace rlbox::detail {
+
+template<typename T_To, typename T_From>
+inline constexpr void convert_type_fundamental(T_To& to,
+ const volatile T_From& from)
+{
+ using namespace std;
+
+ if_constexpr_named(cond1, !is_fundamental_or_enum_v<T_To>)
+ {
+ rlbox_detail_static_fail_because(
+ cond1, "Conversion target should be fundamental or enum type");
+ }
+ else if_constexpr_named(cond2, !is_fundamental_or_enum_v<T_From>)
+ {
+ rlbox_detail_static_fail_because(
+ cond2, "Conversion source should be fundamental or enum type");
+ }
+ else if_constexpr_named(cond3, is_enum_v<T_To> || is_enum_v<T_From>)
+ {
+ static_assert(std::is_same_v<detail::remove_cv_ref_t<T_To>,
+ detail::remove_cv_ref_t<T_From>>);
+ to = from;
+ }
+ else if_constexpr_named(
+ cond4, is_floating_point_v<T_To> || is_floating_point_v<T_From>)
+ {
+ static_assert(is_floating_point_v<T_To> && is_floating_point_v<T_From>);
+ // language coerces different float types
+ to = from;
+ }
+ else if_constexpr_named(cond5, is_integral_v<T_To> || is_integral_v<T_From>)
+ {
+ static_assert(is_integral_v<T_To> && is_integral_v<T_From>);
+
+ const char* err_msg =
+ "Over/Underflow when converting between integer types";
+
+ // Some branches don't use the param
+ RLBOX_UNUSED(err_msg);
+
+ if constexpr (is_signed_v<T_To> == is_signed_v<T_From> &&
+ sizeof(T_To) >= sizeof(T_From)) {
+ // Eg: int64_t from int32_t, uint64_t from uint32_t
+ } else if constexpr (is_unsigned_v<T_To> && is_unsigned_v<T_From>) {
+ // Eg: uint32_t from uint64_t
+ dynamic_check(from <= numeric_limits<T_To>::max(), err_msg);
+ } else if constexpr (is_signed_v<T_To> && is_signed_v<T_From>) {
+ // Eg: int32_t from int64_t
+ dynamic_check(from >= numeric_limits<T_To>::min(), err_msg);
+ dynamic_check(from <= numeric_limits<T_To>::max(), err_msg);
+ } else if constexpr (is_unsigned_v<T_To> && is_signed_v<T_From>) {
+ if constexpr (sizeof(T_To) < sizeof(T_From)) {
+ // Eg: uint32_t from int64_t
+ dynamic_check(from >= 0, err_msg);
+ auto to_max = numeric_limits<T_To>::max();
+ dynamic_check(from <= static_cast<T_From>(to_max), err_msg);
+ } else {
+ // Eg: uint32_t from int32_t, uint64_t from int32_t
+ dynamic_check(from >= 0, err_msg);
+ }
+ } else if constexpr (is_signed_v<T_To> && is_unsigned_v<T_From>) {
+ if constexpr (sizeof(T_To) <= sizeof(T_From)) {
+ // Eg: int32_t from uint32_t, int32_t from uint64_t
+ auto to_max = numeric_limits<T_To>::max();
+ dynamic_check(from <= static_cast<T_From>(to_max), err_msg);
+ } else {
+ // Eg: int64_t from uint32_t
+ }
+ }
+ to = static_cast<T_To>(from);
+ }
+ else
+ {
+ constexpr auto unknownCase = !(cond1 || cond2 || cond3 || cond4 || cond5);
+ rlbox_detail_static_fail_because(
+ unknownCase, "Unexpected case for convert_type_fundamental");
+ }
+}
+
+template<typename T_To, typename T_From>
+inline constexpr void convert_type_fundamental_or_array(T_To& to,
+ const T_From& from)
+{
+ using namespace std;
+
+ using T_To_C = std_array_to_c_arr_t<T_To>;
+ using T_From_C = std_array_to_c_arr_t<T_From>;
+ using T_To_El = remove_all_extents_t<T_To_C>;
+ using T_From_El = remove_all_extents_t<T_From_C>;
+
+ if_constexpr_named(cond1, is_array_v<T_To_C> != is_array_v<T_From_C>)
+ {
+ rlbox_detail_static_fail_because(
+ cond1, "Conversion should not go between array and non array types");
+ }
+ else if constexpr (!is_array_v<T_To_C>)
+ {
+ convert_type_fundamental(to, from);
+ }
+ else if_constexpr_named(cond2, !all_extents_same<T_To_C, T_From_C>)
+ {
+ rlbox_detail_static_fail_because(
+ cond2, "Conversion between arrays should have same dimensions");
+ }
+ else if_constexpr_named(cond3,
+ is_pointer_v<T_To_El> || is_pointer_v<T_From_El>)
+ {
+ rlbox_detail_static_fail_because(cond3,
+ "convert_type_fundamental_or_array "
+ "does not allow arrays of pointers");
+ }
+ else
+ {
+ // Explicitly using size to check for element type as we may be going across
+ // different types of the same width such as void* and uintptr_t
+ if constexpr (sizeof(T_To_El) == sizeof(T_From_El) &&
+ is_signed_v<T_To_El> == is_signed_v<T_From_El>) {
+ // Sanity check - this should definitely be true
+ static_assert(sizeof(T_From_C) == sizeof(T_To_C));
+ std::memcpy(&to, &from, sizeof(T_To_C));
+ } else {
+ for (size_t i = 0; i < std::extent_v<T_To_C>; i++) {
+ convert_type_fundamental_or_array(to[i], from[i]);
+ }
+ }
+ }
+}
+
+enum class adjust_type_direction
+{
+ TO_SANDBOX,
+ TO_APPLICATION,
+ NO_CHANGE
+};
+
+enum class adjust_type_context
+{
+ EXAMPLE,
+ SANDBOX
+};
+
+template<typename T_Sbx,
+ adjust_type_direction Direction,
+ adjust_type_context Context,
+ typename T_To,
+ typename T_From>
+inline constexpr void convert_type_non_class(
+ T_To& to,
+ const T_From& from,
+ const void* example_unsandboxed_ptr,
+ rlbox_sandbox<T_Sbx>* sandbox_ptr)
+{
+ using namespace std;
+
+ // Some branches don't use the param
+ RLBOX_UNUSED(example_unsandboxed_ptr);
+ RLBOX_UNUSED(sandbox_ptr);
+
+ using T_To_C = std_array_to_c_arr_t<T_To>;
+ using T_From_C = std_array_to_c_arr_t<T_From>;
+ using T_To_El = remove_all_extents_t<T_To_C>;
+ using T_From_El = remove_all_extents_t<T_From_C>;
+
+ if constexpr (is_pointer_v<T_To_C> || is_pointer_v<T_From_C>) {
+
+ if constexpr (Direction == adjust_type_direction::NO_CHANGE) {
+
+ static_assert(is_pointer_v<T_To_C> && is_pointer_v<T_From_C> &&
+ sizeof(T_To_C) == sizeof(T_From_C));
+ to = from;
+
+ } else if constexpr (Direction == adjust_type_direction::TO_SANDBOX) {
+
+ static_assert(is_pointer_v<T_From_C>);
+ // Maybe a function pointer, so convert
+ auto from_c = reinterpret_cast<const void*>(from);
+ if constexpr (Context == adjust_type_context::SANDBOX) {
+ RLBOX_DEBUG_ASSERT(sandbox_ptr != nullptr);
+ to = sandbox_ptr->template get_sandboxed_pointer<T_From_C>(from_c);
+ } else {
+ RLBOX_DEBUG_ASSERT(from_c == nullptr ||
+ example_unsandboxed_ptr != nullptr);
+ to =
+ rlbox_sandbox<T_Sbx>::template get_sandboxed_pointer_no_ctx<T_From_C>(
+ from_c, example_unsandboxed_ptr);
+ }
+
+ } else if constexpr (Direction == adjust_type_direction::TO_APPLICATION) {
+
+ static_assert(is_pointer_v<T_To_C>);
+ if constexpr (Context == adjust_type_context::SANDBOX) {
+ RLBOX_DEBUG_ASSERT(sandbox_ptr != nullptr);
+ to = sandbox_ptr->template get_unsandboxed_pointer<T_To_C>(from);
+ } else {
+ RLBOX_DEBUG_ASSERT(from == 0 || example_unsandboxed_ptr != nullptr);
+ to =
+ rlbox_sandbox<T_Sbx>::template get_unsandboxed_pointer_no_ctx<T_To_C>(
+ from, example_unsandboxed_ptr);
+ }
+ }
+
+ } else if constexpr (is_pointer_v<T_To_El> || is_pointer_v<T_From_El>) {
+
+ if constexpr (Direction == adjust_type_direction::NO_CHANGE) {
+ // Sanity check - this should definitely be true
+ static_assert(sizeof(T_To_El) == sizeof(T_From_El) &&
+ sizeof(T_From_C) == sizeof(T_To_C));
+ memcpy(&to, &from, sizeof(T_To_C));
+ } else {
+ for (size_t i = 0; i < std::extent_v<T_To_C>; i++) {
+ convert_type_non_class<T_Sbx, Direction, Context>(
+ to[i], from[i], example_unsandboxed_ptr, sandbox_ptr);
+ }
+ }
+
+ } else {
+ convert_type_fundamental_or_array(to, from);
+ }
+}
+
+// Structs implement their own convert_type by specializing this class
+// Have to do this via a class, as functions can't be partially specialized
+template<typename T_Sbx,
+ adjust_type_direction Direction,
+ adjust_type_context Context,
+ typename T_To,
+ typename T_From>
+class convert_type_class;
+// The specialization implements the following
+// {
+// static inline void run(T_To& to,
+// const T_From& from,
+// const void* example_unsandboxed_ptr);
+// }
+
+template<typename T_Sbx,
+ adjust_type_direction Direction,
+ adjust_type_context Context,
+ typename T_To,
+ typename T_From>
+inline void convert_type(T_To& to,
+ const T_From& from,
+ const void* example_unsandboxed_ptr,
+ rlbox_sandbox<T_Sbx>* sandbox_ptr)
+{
+ if constexpr ((std::is_class_v<T_To> ||
+ std::is_class_v<T_From>)&&!detail::is_std_array_v<T_To> &&
+ !detail::is_std_array_v<T_From>) {
+ // Sanity check
+ static_assert(std::is_class_v<T_From> && std::is_class_v<T_To>);
+ convert_type_class<T_Sbx, Direction, Context, T_To, T_From>::run(
+ to, from, example_unsandboxed_ptr, sandbox_ptr);
+ } else {
+ convert_type_non_class<T_Sbx, Direction, Context>(
+ to, from, example_unsandboxed_ptr, sandbox_ptr);
+ }
+}
+
+} \ No newline at end of file
diff --git a/third_party/rlbox/include/rlbox_dylib_sandbox.hpp b/third_party/rlbox/include/rlbox_dylib_sandbox.hpp
new file mode 100644
index 0000000000..9878674d71
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_dylib_sandbox.hpp
@@ -0,0 +1,314 @@
+#pragma once
+
+#include <cstdint>
+#include <cstdlib>
+#include <mutex>
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# include <shared_mutex>
+#endif
+#include <utility>
+
+#if defined(_WIN32)
+// Ensure the min/max macro in the header doesn't collide with functions in
+// std::
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <windows.h>
+#else
+# include <dlfcn.h>
+#endif
+
+#include "rlbox_helpers.hpp"
+
+namespace rlbox {
+
+class rlbox_dylib_sandbox;
+
+struct rlbox_dylib_sandbox_thread_data
+{
+ rlbox_dylib_sandbox* sandbox;
+ uint32_t last_callback_invoked;
+};
+
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+
+rlbox_dylib_sandbox_thread_data* get_rlbox_dylib_sandbox_thread_data();
+# define RLBOX_DYLIB_SANDBOX_STATIC_VARIABLES() \
+ thread_local rlbox::rlbox_dylib_sandbox_thread_data \
+ rlbox_dylib_sandbox_thread_info{ 0, 0 }; \
+ namespace rlbox { \
+ rlbox_dylib_sandbox_thread_data* get_rlbox_dylib_sandbox_thread_data() \
+ { \
+ return &rlbox_dylib_sandbox_thread_info; \
+ } \
+ } \
+ static_assert(true, "Enforce semi-colon")
+
+#endif
+
+/**
+ * @brief Class that implements the null sandbox. This sandbox doesn't actually
+ * provide any isolation and only serves as a stepping stone towards migrating
+ * an application to use the RLBox API.
+ */
+class rlbox_dylib_sandbox
+{
+public:
+ // Stick with the system defaults
+ using T_LongLongType = long long;
+ using T_LongType = long;
+ using T_IntType = int;
+ using T_PointerType = void*;
+ using T_ShortType = short;
+ // no-op sandbox can transfer buffers as there is no sandboxings
+ // Thus transfer is a noop
+ using can_grant_deny_access = void;
+ // if this plugin uses a separate function to lookup internal callbacks
+ using needs_internal_lookup_symbol = void;
+
+private:
+ void* sandbox = nullptr;
+
+ RLBOX_SHARED_LOCK(callback_mutex);
+ static inline const uint32_t MAX_CALLBACKS = 64;
+ void* callback_unique_keys[MAX_CALLBACKS]{ 0 };
+ void* callbacks[MAX_CALLBACKS]{ 0 };
+
+#ifndef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ thread_local static inline rlbox_dylib_sandbox_thread_data thread_data{ 0,
+ 0 };
+#endif
+
+ template<uint32_t N, typename T_Ret, typename... T_Args>
+ static T_Ret callback_trampoline(T_Args... params)
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_dylib_sandbox_thread_data();
+#endif
+ thread_data.last_callback_invoked = N;
+ using T_Func = T_Ret (*)(T_Args...);
+ T_Func func;
+ {
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, thread_data.sandbox->callback_mutex);
+#endif
+ func = reinterpret_cast<T_Func>(thread_data.sandbox->callbacks[N]);
+ }
+ // Callbacks are invoked through function pointers, cannot use std::forward
+ // as we don't have caller context for T_Args, which means they are all
+ // effectively passed by value
+ return func(params...);
+ }
+
+protected:
+#if defined(_WIN32)
+ using path_buf = const LPCWSTR;
+#else
+ using path_buf = const char*;
+#endif
+
+ inline void impl_create_sandbox(path_buf path)
+ {
+#if defined(_WIN32)
+ sandbox = (void*)LoadLibraryW(path);
+#else
+ sandbox = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+#endif
+
+ if (!sandbox) {
+ std::string error_msg = "Could not load dynamic library: ";
+#if defined(_WIN32)
+ DWORD errorMessageID = GetLastError();
+ if (errorMessageID != 0) {
+ LPSTR messageBuffer = nullptr;
+ // The api creates the buffer that holds the message
+ size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ errorMessageID,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)&messageBuffer,
+ 0,
+ NULL);
+ // Copy the error message into a std::string.
+ std::string message(messageBuffer, size);
+ error_msg += message;
+ LocalFree(messageBuffer);
+ }
+#else
+ error_msg += dlerror();
+#endif
+ detail::dynamic_check(false, error_msg.c_str());
+ }
+ }
+
+ inline void impl_destroy_sandbox()
+ {
+#if defined(_WIN32)
+ FreeLibrary((HMODULE)sandbox);
+#else
+ dlclose(sandbox);
+#endif
+ sandbox = nullptr;
+ }
+
+ template<typename T>
+ inline void* impl_get_unsandboxed_pointer(T_PointerType p) const
+ {
+ return p;
+ }
+
+ template<typename T>
+ inline T_PointerType impl_get_sandboxed_pointer(const void* p) const
+ {
+ return const_cast<T_PointerType>(p);
+ }
+
+ template<typename T>
+ static inline void* impl_get_unsandboxed_pointer_no_ctx(
+ T_PointerType p,
+ const void* /* example_unsandboxed_ptr */,
+ rlbox_dylib_sandbox* (* // Func ptr
+ /* param: expensive_sandbox_finder */)(
+ const void* example_unsandboxed_ptr))
+ {
+ return p;
+ }
+
+ template<typename T>
+ static inline T_PointerType impl_get_sandboxed_pointer_no_ctx(
+ const void* p,
+ const void* /* example_unsandboxed_ptr */,
+ rlbox_dylib_sandbox* (* // Func ptr
+ /* param: expensive_sandbox_finder */)(
+ const void* example_unsandboxed_ptr))
+ {
+ return const_cast<T_PointerType>(p);
+ }
+
+ inline T_PointerType impl_malloc_in_sandbox(size_t size)
+ {
+ void* p = malloc(size);
+ return p;
+ }
+
+ inline void impl_free_in_sandbox(T_PointerType p) { free(p); }
+
+ static inline bool impl_is_in_same_sandbox(const void*, const void*)
+ {
+ return true;
+ }
+
+ inline bool impl_is_pointer_in_sandbox_memory(const void*) { return true; }
+ inline bool impl_is_pointer_in_app_memory(const void*) { return true; }
+
+ inline size_t impl_get_total_memory()
+ {
+ return std::numeric_limits<size_t>::max();
+ }
+
+ inline void* impl_get_memory_location()
+ {
+ // There isn't any sandbox memory for the dylib_sandbox as we just redirect
+ // to the app. Also, this is mostly used for pointer swizzling or sandbox
+ // bounds checks which is also not present/not required. So we can just
+ // return null
+ return nullptr;
+ }
+
+ void* impl_lookup_symbol(const char* func_name)
+ {
+#if defined(_WIN32)
+ void* ret = GetProcAddress((HMODULE)sandbox, func_name);
+#else
+ void* ret = dlsym(sandbox, func_name);
+#endif
+ detail::dynamic_check(ret != nullptr, "Symbol not found");
+ return ret;
+ }
+
+ void* impl_internal_lookup_symbol(const char* func_name)
+ {
+ return impl_lookup_symbol(func_name);
+ }
+
+ template<typename T, typename T_Converted, typename... T_Args>
+ auto impl_invoke_with_func_ptr(T_Converted* func_ptr, T_Args&&... params)
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_dylib_sandbox_thread_data();
+#endif
+ auto old_sandbox = thread_data.sandbox;
+ thread_data.sandbox = this;
+ auto on_exit = detail::make_scope_exit([&] {
+ thread_data.sandbox = old_sandbox;
+ });
+ return (*func_ptr)(params...);
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ inline T_PointerType impl_register_callback(void* key, void* callback)
+ {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+
+ void* chosen_trampoline = nullptr;
+
+ // need a compile time for loop as we we need I to be a compile time value
+ // this is because we are returning the I'th callback trampoline
+ detail::compile_time_for<MAX_CALLBACKS>([&](auto I) {
+ if (!chosen_trampoline && callback_unique_keys[I.value] == nullptr) {
+ callback_unique_keys[I.value] = key;
+ callbacks[I.value] = callback;
+ chosen_trampoline = reinterpret_cast<void*>(
+ callback_trampoline<I.value, T_Ret, T_Args...>);
+ }
+ });
+
+ return reinterpret_cast<T_PointerType>(chosen_trampoline);
+ }
+
+ static inline std::pair<rlbox_dylib_sandbox*, void*>
+ impl_get_executed_callback_sandbox_and_key()
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_dylib_sandbox_thread_data();
+#endif
+ auto sandbox = thread_data.sandbox;
+ auto callback_num = thread_data.last_callback_invoked;
+ void* key = sandbox->callback_unique_keys[callback_num];
+ return std::make_pair(sandbox, key);
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ inline void impl_unregister_callback(void* key)
+ {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+ for (uint32_t i = 0; i < MAX_CALLBACKS; i++) {
+ if (callback_unique_keys[i] == key) {
+ callback_unique_keys[i] = nullptr;
+ callbacks[i] = nullptr;
+ break;
+ }
+ }
+ }
+
+ template<typename T>
+ inline T* impl_grant_access(T* src, size_t num, bool& success)
+ {
+ RLBOX_UNUSED(num);
+ success = true;
+ return src;
+ }
+
+ template<typename T>
+ inline T* impl_deny_access(T* src, size_t num, bool& success)
+ {
+ RLBOX_UNUSED(num);
+ success = true;
+ return src;
+ }
+};
+
+}
diff --git a/third_party/rlbox/include/rlbox_helpers.hpp b/third_party/rlbox/include/rlbox_helpers.hpp
new file mode 100644
index 0000000000..04c3294693
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_helpers.hpp
@@ -0,0 +1,216 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <cstdlib>
+#include <iostream>
+#include <stdexcept>
+#include <type_traits>
+#include <utility>
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# include <mutex>
+#endif
+
+#include "rlbox_stdlib_polyfill.hpp"
+
+namespace rlbox {
+namespace detail {
+ const int CompileErrorCode = 42;
+
+ inline void dynamic_check(bool check, const char* const msg)
+ {
+ // clang-format off
+ if (!check) {
+ #if __cpp_exceptions && defined(RLBOX_USE_EXCEPTIONS)
+ throw std::runtime_error(msg);
+ #else
+ #ifdef RLBOX_CUSTOM_ABORT
+ RLBOX_CUSTOM_ABORT(msg);
+ #else
+ std::cerr << msg << std::endl;
+ std::abort();
+ #endif
+ #endif
+ }
+ // clang-format on
+ }
+
+#ifdef RLBOX_NO_COMPILE_CHECKS
+# if __cpp_exceptions && defined(RLBOX_USE_EXCEPTIONS)
+# define rlbox_detail_static_fail_because(CondExpr, Message) \
+ ((void)(CondExpr)), throw std::runtime_error(Message)
+# else
+# define rlbox_detail_static_fail_because(CondExpr, Message) abort()
+# endif
+#else
+# define rlbox_detail_static_fail_because(CondExpr, Message) \
+ static_assert(!(CondExpr), Message)
+#endif
+
+#ifdef RLBOX_ENABLE_DEBUG_ASSERTIONS
+# define RLBOX_DEBUG_ASSERT(...) \
+ ::rlbox::detail::dynamic_check(__VA_ARGS__, "Debug assertion failed")
+#else
+# define RLBOX_DEBUG_ASSERT(...) (void)0
+#endif
+
+#define RLBOX_UNUSED(...) (void)__VA_ARGS__
+
+#define RLBOX_REQUIRE_SEMI_COLON static_assert(true)
+
+#define if_constexpr_named(varName, ...) \
+ if constexpr (constexpr auto varName = __VA_ARGS__; varName)
+
+ template<typename... TArgs>
+ void printTypes()
+ {
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+ std::cout << __PRETTY_FUNCTION__ << std::endl; // NOLINT
+#elif defined(_MSC_VER)
+ std::cout << __FUNCSIG__ << std::endl; // NOLINT
+#else
+ std::cout << "Unsupported" << std::endl;
+#endif
+ }
+
+// Create an extension point so applications can provide their own shared lock
+// implementation
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# define RLBOX_SHARED_LOCK(name) std::shared_timed_mutex name
+# define RLBOX_ACQUIRE_SHARED_GUARD(name, ...) \
+ std::shared_lock<std::shared_timed_mutex> name(__VA_ARGS__)
+# define RLBOX_ACQUIRE_UNIQUE_GUARD(name, ...) \
+ std::unique_lock<std::shared_timed_mutex> name(__VA_ARGS__)
+#else
+# if !defined(RLBOX_SHARED_LOCK) || !defined(RLBOX_ACQUIRE_SHARED_GUARD) || \
+ !defined(RLBOX_ACQUIRE_UNIQUE_GUARD)
+# error \
+ "RLBOX_USE_CUSTOM_SHARED_LOCK defined but missing definitions for RLBOX_SHARED_LOCK, RLBOX_ACQUIRE_SHARED_GUARD, RLBOX_ACQUIRE_UNIQUE_GUARD"
+# endif
+#endif
+
+#define rlbox_detail_forward_binop_to_base(opSymbol, ...) \
+ template<typename T_Rhs> \
+ inline auto operator opSymbol(T_Rhs rhs) \
+ { \
+ auto b = static_cast<__VA_ARGS__*>(this); \
+ return (*b)opSymbol rhs; \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+ template<typename T>
+ inline auto remove_volatile_from_ptr_cast(T* ptr)
+ {
+ using T_Result = std::add_pointer_t<std::remove_volatile_t<T>>;
+ return const_cast<T_Result>(ptr);
+ }
+
+ // https://stackoverflow.com/questions/37602057/why-isnt-a-for-loop-a-compile-time-expression
+ namespace compile_time_for_detail {
+ template<std::size_t N>
+ struct num
+ {
+ static const constexpr auto value = N;
+ };
+
+ template<class F, std::size_t... Is>
+ inline void compile_time_for_helper(F func, std::index_sequence<Is...>)
+ {
+ (func(num<Is>{}), ...);
+ }
+ }
+
+ template<std::size_t N, typename F>
+ inline void compile_time_for(F func)
+ {
+ compile_time_for_detail::compile_time_for_helper(
+ func, std::make_index_sequence<N>());
+ }
+
+ template<typename T, typename T2>
+ [[nodiscard]] inline auto return_first_result(T first_task, T2 second_task)
+ {
+ using T_Result = rlbox::detail::polyfill::invoke_result_t<T>;
+
+ if constexpr (std::is_void_v<T_Result>) {
+ first_task();
+ second_task();
+ } else {
+ auto val = first_task();
+ second_task();
+ return val;
+ }
+ }
+
+ // Scope Exit guards
+ template<typename T_ExitFunc>
+ class scope_exit
+ {
+ T_ExitFunc exit_func;
+ bool released;
+
+ public:
+ explicit scope_exit(T_ExitFunc&& cleanup)
+ : exit_func(cleanup)
+ , released(true)
+ {}
+
+ scope_exit(scope_exit&& rhs)
+ : exit_func(std::move(rhs.exit_func))
+ , released(rhs.released)
+ {
+ rhs.release();
+ }
+
+ ~scope_exit()
+ {
+ if (released) {
+ exit_func();
+ }
+ }
+
+ void release() { released = false; }
+
+ private:
+ explicit scope_exit(const scope_exit&) = delete;
+ scope_exit& operator=(const scope_exit&) = delete;
+ scope_exit& operator=(scope_exit&&) = delete;
+ };
+
+ template<typename T_ExitFunc>
+ [[nodiscard]] scope_exit<T_ExitFunc> make_scope_exit(
+ T_ExitFunc&& exitFunction)
+ {
+ return scope_exit<T_ExitFunc>(std::move(exitFunction));
+ }
+
+/*
+Make sure classes can access the private memmbers of tainted<T1> and
+tainted_volatile. Ideally, this should be
+
+template <typename U1>
+friend class tainted<U1, T_Sandbox>;
+
+But C++ doesn't seem to allow the above
+*/
+#define KEEP_CLASSES_FRIENDLY \
+ template<template<typename, typename> typename U1, typename U2, typename U3> \
+ friend class tainted_base_impl; \
+ \
+ template<typename U1, typename U2> \
+ friend class tainted; \
+ \
+ template<typename U1, typename U2> \
+ friend class tainted_volatile; \
+ \
+ template<typename U1> \
+ friend class rlbox_sandbox; \
+ \
+ template<typename U1, typename U2> \
+ friend class sandbox_callback; \
+ \
+ template<typename U1, typename U2> \
+ friend class app_pointer;
+}
+
+}
diff --git a/third_party/rlbox/include/rlbox_noop_sandbox.hpp b/third_party/rlbox/include/rlbox_noop_sandbox.hpp
new file mode 100644
index 0000000000..d1e48edb4f
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_noop_sandbox.hpp
@@ -0,0 +1,254 @@
+#pragma once
+
+#include <cstdint>
+#include <cstdlib>
+#include <mutex>
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# include <shared_mutex>
+#endif
+#include <utility>
+
+#include "rlbox_helpers.hpp"
+
+namespace rlbox {
+
+class rlbox_noop_sandbox;
+
+struct rlbox_noop_sandbox_thread_data
+{
+ rlbox_noop_sandbox* sandbox;
+ uint32_t last_callback_invoked;
+};
+
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+
+rlbox_noop_sandbox_thread_data* get_rlbox_noop_sandbox_thread_data();
+# define RLBOX_NOOP_SANDBOX_STATIC_VARIABLES() \
+ thread_local rlbox::rlbox_noop_sandbox_thread_data \
+ rlbox_noop_sandbox_thread_info{ 0, 0 }; \
+ namespace rlbox { \
+ rlbox_noop_sandbox_thread_data* get_rlbox_noop_sandbox_thread_data() \
+ { \
+ return &rlbox_noop_sandbox_thread_info; \
+ } \
+ } \
+ static_assert(true, "Enforce semi-colon")
+
+#endif
+
+/**
+ * @brief Class that implements the null sandbox. This sandbox doesn't actually
+ * provide any isolation and only serves as a stepping stone towards migrating
+ * an application to use the RLBox API.
+ */
+class rlbox_noop_sandbox
+{
+public:
+ // Stick with the system defaults
+ using T_LongLongType = long long;
+ using T_LongType = long;
+ using T_IntType = int;
+ using T_PointerType = void*;
+ using T_ShortType = short;
+ // no-op sandbox can transfer buffers as there is no sandboxings
+ // Thus transfer is a noop
+ using can_grant_deny_access = void;
+
+private:
+ RLBOX_SHARED_LOCK(callback_mutex);
+ static inline const uint32_t MAX_CALLBACKS = 64;
+ void* callback_unique_keys[MAX_CALLBACKS]{ 0 };
+ void* callbacks[MAX_CALLBACKS]{ 0 };
+
+#ifndef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ thread_local static inline rlbox_noop_sandbox_thread_data thread_data{ 0, 0 };
+#endif
+
+ template<uint32_t N, typename T_Ret, typename... T_Args>
+ static T_Ret callback_trampoline(T_Args... params)
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_noop_sandbox_thread_data();
+#endif
+ thread_data.last_callback_invoked = N;
+ using T_Func = T_Ret (*)(T_Args...);
+ T_Func func;
+ {
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, thread_data.sandbox->callback_mutex);
+#endif
+ func = reinterpret_cast<T_Func>(thread_data.sandbox->callbacks[N]);
+ }
+ // Callbacks are invoked through function pointers, cannot use std::forward
+ // as we don't have caller context for T_Args, which means they are all
+ // effectively passed by value
+ return func(params...);
+ }
+
+protected:
+ inline void impl_create_sandbox() {}
+
+ inline void impl_destroy_sandbox() {}
+
+ template<typename T>
+ inline void* impl_get_unsandboxed_pointer(T_PointerType p) const
+ {
+ return p;
+ }
+
+ template<typename T>
+ inline T_PointerType impl_get_sandboxed_pointer(const void* p) const
+ {
+ return const_cast<T_PointerType>(p);
+ }
+
+ template<typename T>
+ static inline void* impl_get_unsandboxed_pointer_no_ctx(
+ T_PointerType p,
+ const void* /* example_unsandboxed_ptr */,
+ rlbox_noop_sandbox* (* // Func ptr
+ /* param: expensive_sandbox_finder */)(
+ const void* example_unsandboxed_ptr))
+ {
+ return p;
+ }
+
+ template<typename T>
+ static inline T_PointerType impl_get_sandboxed_pointer_no_ctx(
+ const void* p,
+ const void* /* example_unsandboxed_ptr */,
+ rlbox_noop_sandbox* (* // Func ptr
+ /* param: expensive_sandbox_finder */)(
+ const void* example_unsandboxed_ptr))
+ {
+ return const_cast<T_PointerType>(p);
+ }
+
+ inline T_PointerType impl_malloc_in_sandbox(size_t size)
+ {
+ void* p = malloc(size);
+ return p;
+ }
+
+ inline void impl_free_in_sandbox(T_PointerType p) { free(p); }
+
+ static inline bool impl_is_in_same_sandbox(const void*, const void*)
+ {
+ return true;
+ }
+
+ inline bool impl_is_pointer_in_sandbox_memory(const void*) { return true; }
+ inline bool impl_is_pointer_in_app_memory(const void*) { return true; }
+
+ inline size_t impl_get_total_memory()
+ {
+ return std::numeric_limits<size_t>::max();
+ }
+
+ inline void* impl_get_memory_location()
+ {
+ // There isn't any sandbox memory for the noop_sandbox as we just redirect
+ // to the app. Also, this is mostly used for pointer swizzling or sandbox
+ // bounds checks which is also not present/not required. So we can just
+ // return null
+ return nullptr;
+ }
+
+ // adding a template so that we can use static_assert to fire only if this
+ // function is invoked
+ template<typename T = void>
+ void* impl_lookup_symbol(const char* /* func_name */)
+ {
+ // Will fire if this impl_lookup_symbol is ever called for the static
+ // sandbox
+ constexpr bool fail = std::is_same_v<T, void>;
+ rlbox_detail_static_fail_because(
+ fail,
+ "The no_op_sandbox uses static calls and thus developers should add\n\n"
+ "#define RLBOX_USE_STATIC_CALLS() rlbox_noop_sandbox_lookup_symbol\n\n"
+ "to their code, to ensure that static calls are handled correctly.");
+
+ return nullptr;
+ }
+
+#define rlbox_noop_sandbox_lookup_symbol(func_name) \
+ reinterpret_cast<void*>(&func_name) /* NOLINT */
+
+ template<typename T, typename T_Converted, typename... T_Args>
+ auto impl_invoke_with_func_ptr(T_Converted* func_ptr, T_Args&&... params)
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_noop_sandbox_thread_data();
+#endif
+ auto old_sandbox = thread_data.sandbox;
+ thread_data.sandbox = this;
+ auto on_exit = detail::make_scope_exit([&] {
+ thread_data.sandbox = old_sandbox;
+ });
+ return (*func_ptr)(params...);
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ inline T_PointerType impl_register_callback(void* key, void* callback)
+ {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+
+ void* chosen_trampoline = nullptr;
+
+ // need a compile time for loop as we we need I to be a compile time value
+ // this is because we are returning the I'th callback trampoline
+ detail::compile_time_for<MAX_CALLBACKS>([&](auto I) {
+ if (!chosen_trampoline && callback_unique_keys[I.value] == nullptr) {
+ callback_unique_keys[I.value] = key;
+ callbacks[I.value] = callback;
+ chosen_trampoline = reinterpret_cast<void*>(
+ callback_trampoline<I.value, T_Ret, T_Args...>);
+ }
+ });
+
+ return reinterpret_cast<T_PointerType>(chosen_trampoline);
+ }
+
+ static inline std::pair<rlbox_noop_sandbox*, void*>
+ impl_get_executed_callback_sandbox_and_key()
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_noop_sandbox_thread_data();
+#endif
+ auto sandbox = thread_data.sandbox;
+ auto callback_num = thread_data.last_callback_invoked;
+ void* key = sandbox->callback_unique_keys[callback_num];
+ return std::make_pair(sandbox, key);
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ inline void impl_unregister_callback(void* key)
+ {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+ for (uint32_t i = 0; i < MAX_CALLBACKS; i++) {
+ if (callback_unique_keys[i] == key) {
+ callback_unique_keys[i] = nullptr;
+ callbacks[i] = nullptr;
+ break;
+ }
+ }
+ }
+
+ template<typename T>
+ inline T* impl_grant_access(T* src, size_t num, bool& success)
+ {
+ RLBOX_UNUSED(num);
+ success = true;
+ return src;
+ }
+
+ template<typename T>
+ inline T* impl_deny_access(T* src, size_t num, bool& success)
+ {
+ RLBOX_UNUSED(num);
+ success = true;
+ return src;
+ }
+};
+
+}
diff --git a/third_party/rlbox/include/rlbox_policy_types.hpp b/third_party/rlbox/include/rlbox_policy_types.hpp
new file mode 100644
index 0000000000..b5530dedcc
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_policy_types.hpp
@@ -0,0 +1,387 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <type_traits>
+#include <utility>
+
+#include "rlbox_helpers.hpp"
+#include "rlbox_struct_support.hpp"
+#include "rlbox_types.hpp"
+
+namespace rlbox {
+
+namespace callback_detail {
+
+ // Compute the expected type of the callback
+ template<typename T_Sbx, typename T_Ret, typename... T_Args>
+ using T_Cb =
+ std::conditional_t<std::is_void_v<T_Ret>, void, tainted<T_Ret, T_Sbx>> (*)(
+ rlbox_sandbox<T_Sbx>&,
+ tainted<T_Args, T_Sbx>...);
+
+ template<typename T_Sbx, typename T_Ret, typename... T_Args>
+ T_Cb<T_Sbx, T_Ret, T_Args...> callback_type_helper(T_Ret (*)(T_Args...));
+
+ // Compute the expected type of the interceptor
+ template<typename T_Sbx, typename T_Ret, typename... T_Args>
+ using T_I = detail::convert_to_sandbox_equivalent_t<T_Ret, T_Sbx> (*)(
+ detail::convert_to_sandbox_equivalent_t<T_Args, T_Sbx>...);
+
+ template<typename T_Sbx, typename T_Ret, typename... T_Args>
+ T_I<T_Sbx, T_Ret, T_Args...> interceptor_type_helper(T_Ret (*)(T_Args...));
+}
+
+template<typename T, typename T_Sbx>
+class sandbox_callback
+{
+ KEEP_CLASSES_FRIENDLY
+
+private:
+ rlbox_sandbox<T_Sbx>* sandbox;
+
+ using T_Callback =
+ decltype(callback_detail::callback_type_helper<T_Sbx>(std::declval<T>()));
+ T_Callback callback;
+
+ // The interceptor is the function that runs between the sandbox invoking the
+ // callback and the actual callback running The interceptor is responsible for
+ // wrapping and converting callback arguments, returns etc. to their
+ // appropriate representations
+ using T_Interceptor =
+ decltype(callback_detail::interceptor_type_helper<T_Sbx>(
+ std::declval<T>()));
+ T_Interceptor callback_interceptor;
+
+ // The trampoline is the internal sandbox representation of the callback
+ // Depending on the sandbox type, this could be the callback pointer directly
+ // or a trampoline function that gates exits from the sandbox.
+ using T_Trampoline = detail::convert_to_sandbox_equivalent_t<T, T_Sbx>;
+ T_Trampoline callback_trampoline;
+
+ // The unique key representing the callback to pass to unregister_callback on
+ // destruction
+ void* key;
+
+ inline void move_obj(sandbox_callback&& other)
+ {
+ sandbox = other.sandbox;
+ callback = other.callback;
+ callback_interceptor = other.callback_interceptor;
+ callback_trampoline = other.callback_trampoline;
+ key = other.key;
+ other.sandbox = nullptr;
+ other.callback = nullptr;
+ other.callback_interceptor = nullptr;
+ other.callback_trampoline = 0;
+ other.key = nullptr;
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ inline void unregister_helper(T_Ret (*)(T_Args...))
+ {
+ if (callback != nullptr) {
+ // Don't need to worry about race between unregister and move as
+ // 1) this will not happen in a correctly written program
+ // 2) if this does happen, the worst that can happen is an invocation of a
+ // null function pointer, which causes a crash that cannot be exploited
+ // for RCE
+ sandbox->template unregister_callback<T_Ret, T_Args...>(key);
+ sandbox = nullptr;
+ callback = nullptr;
+ callback_interceptor = nullptr;
+ callback_trampoline = 0;
+ key = nullptr;
+ }
+ }
+
+ inline T_Callback get_raw_value() const noexcept { return callback; }
+ inline T_Trampoline get_raw_sandbox_value() const noexcept
+ {
+ return callback_trampoline;
+ }
+
+ // Keep constructor private as only rlbox_sandbox should be able to create
+ // this object
+ sandbox_callback(rlbox_sandbox<T_Sbx>* p_sandbox,
+ T_Callback p_callback,
+ T_Interceptor p_callback_interceptor,
+ T_Trampoline p_callback_trampoline,
+ void* p_key)
+ : sandbox(p_sandbox)
+ , callback(p_callback)
+ , callback_interceptor(p_callback_interceptor)
+ , callback_trampoline(p_callback_trampoline)
+ , key(p_key)
+ {
+ detail::dynamic_check(sandbox != nullptr,
+ "Unexpected null sandbox when creating a callback");
+ }
+
+public:
+ sandbox_callback()
+ : sandbox(nullptr)
+ , callback(nullptr)
+ , callback_interceptor(nullptr)
+ , callback_trampoline(0)
+ , key(nullptr)
+ {}
+
+ sandbox_callback(sandbox_callback&& other)
+ {
+ move_obj(std::forward<sandbox_callback>(other));
+ }
+
+ inline sandbox_callback& operator=(sandbox_callback&& other)
+ {
+ if (this != &other) {
+ move_obj(std::forward<sandbox_callback>(other));
+ }
+ return *this;
+ }
+
+ void unregister()
+ {
+ T dummy = nullptr;
+ unregister_helper(dummy);
+ }
+
+ ~sandbox_callback() { unregister(); }
+
+ /**
+ * @brief Check if callback is _not_ registered.
+ */
+ inline bool is_unregistered() const noexcept
+ {
+ return get_raw_value() == nullptr;
+ }
+
+ /**
+ * @brief Unwrap a callback without verification. This is an unsafe operation
+ * and should be used with care.
+ */
+ inline auto UNSAFE_unverified() const noexcept { return get_raw_value(); }
+ /**
+ * @brief Like UNSAFE_unverified, but get the underlying sandbox
+ * representation.
+ *
+ * @param sandbox Reference to sandbox.
+ */
+ inline auto UNSAFE_sandboxed(rlbox_sandbox<T_Sbx>& sandbox) const noexcept
+ {
+ RLBOX_UNUSED(sandbox);
+ return get_raw_sandbox_value();
+ }
+};
+
+template<typename T, typename T_Sbx>
+class app_pointer
+{
+ KEEP_CLASSES_FRIENDLY
+
+private:
+ app_pointer_map<typename T_Sbx::T_PointerType>* map;
+ typename T_Sbx::T_PointerType idx;
+ T idx_unsandboxed;
+
+ inline void move_obj(app_pointer&& other)
+ {
+ map = other.map;
+ idx = other.idx;
+ idx_unsandboxed = other.idx_unsandboxed;
+ other.map = nullptr;
+ other.idx = 0;
+ other.idx_unsandboxed = nullptr;
+ }
+
+ inline T get_raw_value() const noexcept
+ {
+ return to_tainted().get_raw_value();
+ }
+ inline typename T_Sbx::T_PointerType get_raw_sandbox_value() const noexcept
+ {
+ return idx;
+ }
+
+ app_pointer(app_pointer_map<typename T_Sbx::T_PointerType>* a_map,
+ typename T_Sbx::T_PointerType a_idx,
+ T a_idx_unsandboxed)
+ : map(a_map)
+ , idx(a_idx)
+ , idx_unsandboxed(a_idx_unsandboxed)
+ {}
+
+public:
+ app_pointer()
+ : map(nullptr)
+ , idx(0)
+ , idx_unsandboxed(0)
+ {}
+
+ ~app_pointer() { unregister(); }
+
+ app_pointer(app_pointer&& other)
+ {
+ move_obj(std::forward<app_pointer>(other));
+ }
+
+ inline app_pointer& operator=(app_pointer&& other)
+ {
+ if (this != &other) {
+ move_obj(std::forward<app_pointer>(other));
+ }
+ return *this;
+ }
+
+ void unregister()
+ {
+ if (idx != 0) {
+ map->remove_app_ptr(idx);
+ map = nullptr;
+ idx = 0;
+ idx_unsandboxed = nullptr;
+ }
+ }
+
+ tainted<T, T_Sbx> to_tainted()
+ {
+ return tainted<T, T_Sbx>::internal_factory(
+ reinterpret_cast<T>(idx_unsandboxed));
+ }
+
+ /**
+ * @brief Check if app pointer is _not_ registered.
+ */
+ inline bool is_unregistered() const noexcept { return idx == 0; }
+
+ /**
+ * @brief Unwrap app_pointer without verification. This is an unsafe operation
+ * and should be used with care.
+ */
+ inline auto UNSAFE_unverified() const noexcept { return get_raw_value(); }
+ /**
+ * @brief Like UNSAFE_unverified, but get the underlying sandbox
+ * representation.
+ *
+ * @param sandbox Reference to sandbox.
+ */
+ inline auto UNSAFE_sandboxed(rlbox_sandbox<T_Sbx>& sandbox) const noexcept
+ {
+ RLBOX_UNUSED(sandbox);
+ return get_raw_sandbox_value();
+ }
+};
+
+/**
+ * @brief Tainted boolean value that serves as a "hint" and not a definite
+ * answer. Comparisons with a tainted_volatile return such hints. They are
+ * not `tainted<bool>` values because a compromised sandbox can modify
+ * tainted_volatile data at any time.
+ */
+class tainted_boolean_hint
+{
+private:
+ bool val;
+
+public:
+ tainted_boolean_hint(bool init)
+ : val(init)
+ {}
+ tainted_boolean_hint(const tainted_boolean_hint&) = default;
+ inline tainted_boolean_hint& operator=(bool rhs)
+ {
+ val = rhs;
+ return *this;
+ }
+ inline tainted_boolean_hint operator!() const { return tainted_boolean_hint(!val); }
+ template<size_t N>
+ inline bool unverified_safe_because(const char (&reason)[N]) const
+ {
+ (void)reason; /* unused */
+ return val;
+ }
+ inline bool UNSAFE_unverified() const { return val; }
+ inline auto INTERNAL_unverified_safe() const { return UNSAFE_unverified(); }
+
+ // Add a template parameter to make sure the assert only fires when called
+ template<typename T = void>
+ inline bool copy_and_verify(...) const
+ {
+ rlbox_detail_static_fail_because(
+ detail::true_v<T>,
+ "You can't call copy_and_verify on this value, as this is a result of a "
+ "comparison with memory accessible by the sandbox. \n"
+ "The sandbox could unexpectedly change the value leading to "
+ "time-of-check-time-of-use attacks. \n"
+ "You can avoid this by making a local copy of the data."
+ "For example, if your original code, looked like \n"
+ "if ((tainted_ptr->member == 5).copy_and_verify(...)) { ... } \n\n"
+ "Change this to \n\n"
+ "tainted<int> val = tainted_ptr->member\n"
+ "if ((val == 5).copy_and_verify(...)) { ... } \n\n"
+ "tainted<int, T_Sbx> foo(rlbox_sandbox<T_Sbx>& sandbox) {...} \n\n"
+ "Alternately, if you are sure your code is safe you can use the "
+ "unverified_safe_because API to remove tainting\n");
+
+ // this is never executed, but we need it for the function to type-check
+ return false;
+ }
+};
+
+/**
+ * @brief Tainted integer value that serves as a "hint" and not a definite
+ * answer. Comparisons with a tainted_volatile return such hints. They are
+ * not `tainted<int>` values because a compromised sandbox can modify
+ * tainted_volatile data at any time.
+ */
+class tainted_int_hint
+{
+private:
+ int val;
+
+public:
+ tainted_int_hint(int init)
+ : val(init)
+ {}
+ tainted_int_hint(const tainted_int_hint&) = default;
+ inline tainted_int_hint& operator=(int rhs)
+ {
+ val = rhs;
+ return *this;
+ }
+ inline tainted_boolean_hint operator!() const { return tainted_boolean_hint(!val); }
+ template<size_t N>
+ inline int unverified_safe_because(const char (&reason)[N]) const
+ {
+ (void)reason; /* unused */
+ return val;
+ }
+ inline int UNSAFE_unverified() const { return val; }
+ inline auto INTERNAL_unverified_safe() const { return UNSAFE_unverified(); }
+
+ // Add a template parameter to make sure the assert only fires when called
+ template<typename T = void>
+ inline int copy_and_verify(...) const
+ {
+ rlbox_detail_static_fail_because(
+ detail::true_v<T>,
+ "You can't call copy_and_verify on this value, as this is a result of a "
+ "comparison with memory accessible by the sandbox. \n"
+ "The sandbox could unexpectedly change the value leading to "
+ "time-of-check-time-of-use attacks. \n"
+ "You can avoid this by making a local copy of the data."
+ "For example, if your original code, looked like \n"
+ "if ((tainted_ptr->member == 5).copy_and_verify(...)) { ... } \n\n"
+ "Change this to \n\n"
+ "tainted<int> val = tainted_ptr->member\n"
+ "if ((val == 5).copy_and_verify(...)) { ... } \n\n"
+ "tainted<int, T_Sbx> foo(rlbox_sandbox<T_Sbx>& sandbox) {...} \n\n"
+ "Alternately, if you are sure your code is safe you can use the "
+ "unverified_safe_because API to remove tainting\n");
+
+ // this is never executed, but we need it for the function to type-check
+ return 0;
+ }
+};
+
+}
diff --git a/third_party/rlbox/include/rlbox_range.hpp b/third_party/rlbox/include/rlbox_range.hpp
new file mode 100644
index 0000000000..3dafcbd024
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_range.hpp
@@ -0,0 +1,32 @@
+
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <cstdint>
+
+#include "rlbox_types.hpp"
+
+namespace rlbox::detail {
+
+// Checks that a given range is either entirely in a sandbox or entirely
+// outside
+template<typename T_Sbx>
+inline void check_range_doesnt_cross_app_sbx_boundary(const void* ptr,
+ size_t size)
+{
+ auto ptr_start_val = reinterpret_cast<uintptr_t>(ptr);
+ detail::dynamic_check(
+ ptr_start_val,
+ "Performing memory operation memset/memcpy on a null pointer");
+ auto ptr_end_val = ptr_start_val + size - 1;
+
+ auto ptr_start = reinterpret_cast<void*>(ptr_start_val);
+ auto ptr_end = reinterpret_cast<void*>(ptr_end_val);
+
+ detail::dynamic_check(
+ rlbox_sandbox<T_Sbx>::is_in_same_sandbox(ptr_start, ptr_end),
+ "range has overflowed sandbox bounds");
+}
+
+} \ No newline at end of file
diff --git a/third_party/rlbox/include/rlbox_sandbox.hpp b/third_party/rlbox/include/rlbox_sandbox.hpp
new file mode 100644
index 0000000000..b6cebeb3d0
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_sandbox.hpp
@@ -0,0 +1,1094 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <algorithm>
+#include <atomic>
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+# include <chrono>
+#endif
+#include <cstdlib>
+#include <limits>
+#include <map>
+#include <mutex>
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# include <shared_mutex>
+#endif
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+# include <sstream>
+# include <string>
+#endif
+#include <stdint.h>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "rlbox_conversion.hpp"
+#include "rlbox_helpers.hpp"
+#include "rlbox_stdlib_polyfill.hpp"
+#include "rlbox_struct_support.hpp"
+#include "rlbox_type_traits.hpp"
+#include "rlbox_wrapper_traits.hpp"
+
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+using namespace std::chrono;
+#endif
+
+namespace rlbox {
+
+namespace convert_fn_ptr_to_sandbox_equivalent_detail {
+ template<typename T, typename T_Sbx>
+ using conv = ::rlbox::detail::convert_to_sandbox_equivalent_t<T, T_Sbx>;
+
+ template<typename T_Ret, typename... T_Args>
+ using T_Func = T_Ret (*)(T_Args...);
+
+ template<typename T_Sbx, typename T_Ret, typename... T_Args>
+ T_Func<conv<T_Ret, T_Sbx>, conv<T_Args, T_Sbx>...> helper(
+ T_Ret (*)(T_Args...));
+}
+
+#if defined(RLBOX_MEASURE_TRANSITION_TIMES) || \
+ defined(RLBOX_TRANSITION_ACTION_OUT) || defined(RLBOX_TRANSITION_ACTION_IN)
+enum class rlbox_transition
+{
+ INVOKE,
+ CALLBACK
+};
+#endif
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+struct rlbox_transition_timing
+{
+ rlbox_transition invoke;
+ const char* name;
+ void* ptr;
+ int64_t time;
+
+ std::string to_string()
+ {
+ std::ostringstream ret;
+ if (invoke == rlbox_transition::INVOKE) {
+ ret << name;
+ } else {
+ ret << "Callback " << ptr;
+ }
+ ret << " : " << time << "\n";
+
+ return ret.str();
+ }
+};
+#endif
+
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+# error \
+ "RLBox does not yet support threading. Please define RLBOX_SINGLE_THREADED_INVOCATIONS prior to including RLBox and ensure you are only using it from a single thread. If threading is required, please file a bug."
+#endif
+
+/**
+ * @brief Encapsulation for sandboxes.
+ *
+ * @tparam T_Sbx Type of sandbox. For the null sandbox this is
+ * `rlbox_noop_sandbox`
+ */
+template<typename T_Sbx>
+class rlbox_sandbox : protected T_Sbx
+{
+ KEEP_CLASSES_FRIENDLY
+
+private:
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+ std::vector<rlbox_transition_timing> transition_times;
+#endif
+
+ static inline RLBOX_SHARED_LOCK(sandbox_list_lock);
+ // The actual type of the vector is std::vector<rlbox_sandbox<T_Sbx>*>
+ // However clang 5, 6 have bugs where compilation seg-faults on this type
+ // So we just use this std::vector<void*>
+ static inline std::vector<void*> sandbox_list;
+
+ RLBOX_SHARED_LOCK(func_ptr_cache_lock);
+ std::map<std::string, void*> func_ptr_map;
+
+ app_pointer_map<typename T_Sbx::T_PointerType> app_ptr_map;
+
+ // This variable tracks of the sandbox has already been created/destroyed.
+ // APIs in this class should be called only when the sandbox is created.
+ // However, it is expensive to check in APIs such as invoke or in the callback
+ // interceptor. What's more, there could be time of check time of use issues
+ // in the checks as well.
+ // In general, we leave it up to the user to ensure these APIs are never
+ // called prior to sandbox construction or after destruction. We perform some
+ // conservative sanity checks, where they would not add too much overhead.
+ enum class Sandbox_Status
+ {
+ NOT_CREATED,
+ INITIALIZING,
+ CREATED,
+ CLEANING_UP
+ };
+ std::atomic<Sandbox_Status> sandbox_created = Sandbox_Status::NOT_CREATED;
+
+ std::mutex callback_lock;
+ std::vector<void*> callback_keys;
+
+ void* transition_state = nullptr;
+
+ template<typename T>
+ using convert_fn_ptr_to_sandbox_equivalent_t =
+ decltype(::rlbox::convert_fn_ptr_to_sandbox_equivalent_detail::helper<
+ T_Sbx>(std::declval<T>()));
+
+ template<typename T>
+ inline constexpr void check_invoke_param_type_is_ok()
+ {
+ using T_NoRef = std::remove_reference_t<T>;
+
+ if_constexpr_named(cond1, detail::rlbox_is_wrapper_v<T_NoRef>)
+ {
+ if_constexpr_named(
+ subcond1,
+ !std::is_same_v<T_Sbx, detail::rlbox_get_wrapper_sandbox_t<T_NoRef>>)
+ {
+ rlbox_detail_static_fail_because(
+ cond1 && subcond1,
+ "Mixing tainted data from a different sandbox types. This could "
+ "happen due to couple of different reasons.\n"
+ "1. You are using 2 sandbox types for example'rlbox_noop_sandbox' "
+ "and 'rlbox_lucet_sandbox', and are passing tainted data from one "
+ "sandbox as parameters into a function call to the other sandbox. "
+ "This is not allowed, unwrap the tainted data with copy_and_verify "
+ "or other unwrapping APIs first.\n"
+ "2. You have inadvertantly forgotten to set/remove "
+ "RLBOX_USE_STATIC_CALLS depending on the sandbox type. Some sandbox "
+ "types like rlbox_noop_sandbox require this to be set to a given "
+ "value, while other types like rlbox_lucet_sandbox, require this not "
+ "to be set.");
+ }
+ }
+ else if_constexpr_named(cond2,
+ std::is_null_pointer_v<T_NoRef> ||
+ detail::is_fundamental_or_enum_v<T_NoRef>)
+ {}
+ else
+ {
+ constexpr auto unknownCase = !(cond1 || cond2);
+ rlbox_detail_static_fail_because(
+ unknownCase,
+ "Arguments to a sandbox function call should be primitives or wrapped "
+ "types like tainted, callbacks etc.");
+ }
+ }
+
+ template<typename T>
+ inline auto invoke_process_param(T&& param)
+ {
+ check_invoke_param_type_is_ok<T>();
+
+ using T_NoRef = std::remove_reference_t<T>;
+
+ if constexpr (detail::rlbox_is_tainted_opaque_v<T_NoRef>) {
+ auto ret = from_opaque(param);
+ return ret.UNSAFE_sandboxed(*this);
+ } else if constexpr (detail::rlbox_is_wrapper_v<T_NoRef>) {
+ return param.UNSAFE_sandboxed(*this);
+ } else if constexpr (std::is_null_pointer_v<T_NoRef>) {
+ tainted<void*, T_Sbx> ret = nullptr;
+ return ret.UNSAFE_sandboxed(*this);
+ } else if constexpr (detail::is_fundamental_or_enum_v<T_NoRef>) {
+ // For unwrapped primitives, assign to a tainted var and then unwrap so
+ // that we adjust for machine model
+ tainted<T_NoRef, T_Sbx> ret = param;
+ return ret.UNSAFE_sandboxed(*this);
+ } else {
+ rlbox_detail_static_fail_because(
+ detail::true_v<T_NoRef>,
+ "Only tainted types, callbacks or primitive values such as ints can be "
+ "passed as parameters.\n"
+ "To make a parameter tainted, try moving the allocation into the "
+ "sandbox.\n"
+ "If the parameter is a callback, try registering the callback via the "
+ "register_callback API.");
+ }
+ }
+
+ template<typename T, typename T_Arg>
+ inline tainted<T, T_Sbx> sandbox_callback_intercept_convert_param(
+ rlbox_sandbox<T_Sbx>& sandbox,
+ const T_Arg& arg)
+ {
+ tainted<T, T_Sbx> ret;
+ using namespace detail;
+ convert_type<T_Sbx,
+ adjust_type_direction::TO_APPLICATION,
+ adjust_type_context::SANDBOX>(
+ ret.get_raw_value_ref(),
+ arg,
+ nullptr /* example_unsandboxed_ptr */,
+ &sandbox);
+ return ret;
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ static detail::convert_to_sandbox_equivalent_t<T_Ret, T_Sbx>
+ sandbox_callback_interceptor(
+ detail::convert_to_sandbox_equivalent_t<T_Args, T_Sbx>... args)
+ {
+ std::pair<T_Sbx*, void*> context =
+ T_Sbx::impl_get_executed_callback_sandbox_and_key();
+ auto& sandbox = *(reinterpret_cast<rlbox_sandbox<T_Sbx>*>(context.first));
+ auto key = context.second;
+
+ using T_Func_Ret =
+ std::conditional_t<std::is_void_v<T_Ret>, void, tainted<T_Ret, T_Sbx>>;
+ using T_Func =
+ T_Func_Ret (*)(rlbox_sandbox<T_Sbx>&, tainted<T_Args, T_Sbx>...);
+ auto target_fn_ptr = reinterpret_cast<T_Func>(key);
+
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+ high_resolution_clock::time_point enter_time = high_resolution_clock::now();
+ auto on_exit = rlbox::detail::make_scope_exit([&] {
+ auto exit_time = high_resolution_clock::now();
+ int64_t ns = duration_cast<nanoseconds>(exit_time - enter_time).count();
+ sandbox.transition_times.push_back(
+ rlbox_transition_timing{ rlbox_transition::CALLBACK,
+ nullptr /* func_name */,
+ key /* func_ptr */,
+ ns });
+ });
+#endif
+#ifdef RLBOX_TRANSITION_ACTION_OUT
+ RLBOX_TRANSITION_ACTION_OUT(rlbox_transition::CALLBACK,
+ nullptr /* func_name */,
+ key /* func_ptr */,
+ sandbox.transition_state);
+#endif
+#ifdef RLBOX_TRANSITION_ACTION_IN
+ auto on_exit_transition = rlbox::detail::make_scope_exit([&] {
+ RLBOX_TRANSITION_ACTION_IN(rlbox_transition::CALLBACK,
+ nullptr /* func_name */,
+ key /* func_ptr */,
+ sandbox.transition_state);
+ });
+#endif
+ if constexpr (std::is_void_v<T_Func_Ret>) {
+ (*target_fn_ptr)(
+ sandbox,
+ sandbox.template sandbox_callback_intercept_convert_param<T_Args>(
+ sandbox, args)...);
+ return;
+ } else {
+ auto tainted_ret = (*target_fn_ptr)(
+ sandbox,
+ sandbox.template sandbox_callback_intercept_convert_param<T_Args>(
+ sandbox, args)...);
+
+ using namespace detail;
+ convert_to_sandbox_equivalent_t<T_Ret, T_Sbx> ret;
+ convert_type<T_Sbx,
+ adjust_type_direction::TO_SANDBOX,
+ adjust_type_context::SANDBOX>(
+ ret,
+ tainted_ret.get_raw_value_ref(),
+ nullptr /* example_unsandboxed_ptr */,
+ &sandbox);
+ return ret;
+ }
+ }
+
+ /**
+ * @brief Unregister a callback function and disallow the sandbox from
+ * calling this function henceforth.
+ */
+ template<typename T_Ret, typename... T_Args>
+ inline void unregister_callback(void* key)
+ {
+ // Silently swallowing the failure is better here as RAII types may try to
+ // cleanup callbacks after sandbox destruction
+ if (sandbox_created.load() != Sandbox_Status::CREATED) {
+ return;
+ }
+
+ this->template impl_unregister_callback<
+ detail::convert_to_sandbox_equivalent_t<T_Ret, T_Sbx>,
+ detail::convert_to_sandbox_equivalent_t<T_Args, T_Sbx>...>(key);
+
+ std::lock_guard<std::mutex> lock(callback_lock);
+ auto el_ref = std::find(callback_keys.begin(), callback_keys.end(), key);
+ detail::dynamic_check(
+ el_ref != callback_keys.end(),
+ "Unexpected state. Unregistering a callback that was never registered.");
+ callback_keys.erase(el_ref);
+ }
+
+ static T_Sbx* find_sandbox_from_example(const void* example_sandbox_ptr)
+ {
+ detail::dynamic_check(
+ example_sandbox_ptr != nullptr,
+ "Internal error: received a null example pointer. Please file a bug.");
+
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, sandbox_list_lock);
+ for (auto sandbox_v : sandbox_list) {
+ auto sandbox = reinterpret_cast<rlbox_sandbox<T_Sbx>*>(sandbox_v);
+ if (sandbox->is_pointer_in_sandbox_memory(example_sandbox_ptr)) {
+ return sandbox;
+ }
+ }
+
+ return nullptr;
+ }
+
+ template<typename... T_Args>
+ static auto impl_create_sandbox_helper(rlbox_sandbox<T_Sbx>* this_ptr,
+ T_Args... args)
+ {
+ return this_ptr->impl_create_sandbox(std::forward<T_Args>(args)...);
+ }
+
+public:
+ /**
+ * @brief Unused member that allows the calling code to save data in a
+ * "per-sandbox" storage. This can be useful to save context which is used
+ * in callbacks.
+ */
+ void* sandbox_storage;
+
+ /***** Function to adjust for custom machine models *****/
+
+ template<typename T>
+ using convert_to_sandbox_equivalent_nonclass_t =
+ detail::convert_base_types_t<T,
+ typename T_Sbx::T_ShortType,
+ typename T_Sbx::T_IntType,
+ typename T_Sbx::T_LongType,
+ typename T_Sbx::T_LongLongType,
+ typename T_Sbx::T_PointerType>;
+
+ T_Sbx* get_sandbox_impl() { return this; }
+
+ /**
+ * @brief Create a new sandbox.
+ *
+ * @tparam T_Args Arguments passed to the underlying sandbox
+ * implementation. For the null sandbox, no arguments are necessary.
+ */
+ template<typename... T_Args>
+ inline bool create_sandbox(T_Args... args)
+ {
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+ // Warm up the timer. The first call is always slow (at least on the test
+ // platform)
+ for (int i = 0; i < 10; i++) {
+ auto val = high_resolution_clock::now();
+ RLBOX_UNUSED(val);
+ }
+#endif
+ auto expected = Sandbox_Status::NOT_CREATED;
+ bool success = sandbox_created.compare_exchange_strong(
+ expected, Sandbox_Status::INITIALIZING /* desired */);
+ detail::dynamic_check(
+ success,
+ "create_sandbox called when sandbox already created/is being "
+ "created concurrently");
+
+ using T_Result = rlbox::detail::polyfill::invoke_result_t<
+ decltype(impl_create_sandbox_helper<T_Args...>),
+ decltype(this),
+ T_Args...>;
+
+ bool created = true;
+ if constexpr (std::is_same_v<T_Result, void>) {
+ this->impl_create_sandbox(std::forward<T_Args>(args)...);
+ } else if constexpr (std::is_same_v<T_Result, bool>) {
+ created = this->impl_create_sandbox(std::forward<T_Args>(args)...);
+ } else {
+ rlbox_detail_static_fail_because(
+ (!std::is_same_v<T_Result, void> && !std::is_same_v<T_Result, bool>),
+ "Expected impl_create_sandbox to return void or a boolean");
+ }
+
+ if (created) {
+ sandbox_created.store(Sandbox_Status::CREATED);
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, sandbox_list_lock);
+ sandbox_list.push_back(this);
+ }
+
+ return created;
+ }
+
+ /**
+ * @brief Destroy sandbox and reclaim any memory.
+ */
+ inline auto destroy_sandbox()
+ {
+ auto expected = Sandbox_Status::CREATED;
+ bool success = sandbox_created.compare_exchange_strong(
+ expected, Sandbox_Status::CLEANING_UP /* desired */);
+
+ detail::dynamic_check(
+ success,
+ "destroy_sandbox called without sandbox creation/is being "
+ "destroyed concurrently");
+
+ {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, sandbox_list_lock);
+ auto el_ref = std::find(sandbox_list.begin(), sandbox_list.end(), this);
+ detail::dynamic_check(
+ el_ref != sandbox_list.end(),
+ "Unexpected state. Destroying a sandbox that was never initialized.");
+ sandbox_list.erase(el_ref);
+ }
+
+ sandbox_created.store(Sandbox_Status::NOT_CREATED);
+ return this->impl_destroy_sandbox();
+ }
+
+ template<typename T>
+ inline T get_unsandboxed_pointer(
+ convert_to_sandbox_equivalent_nonclass_t<T> p) const
+ {
+ static_assert(std::is_pointer_v<T>);
+ if (p == 0) {
+ return nullptr;
+ }
+ auto ret = this->template impl_get_unsandboxed_pointer<T>(p);
+ return reinterpret_cast<T>(ret);
+ }
+
+ template<typename T>
+ inline convert_to_sandbox_equivalent_nonclass_t<T> get_sandboxed_pointer(
+ const void* p) const
+ {
+ static_assert(std::is_pointer_v<T>);
+ if (p == nullptr) {
+ return 0;
+ }
+ return this->template impl_get_sandboxed_pointer<T>(p);
+ }
+
+ template<typename T>
+ static inline T get_unsandboxed_pointer_no_ctx(
+ convert_to_sandbox_equivalent_nonclass_t<T> p,
+ const void* example_unsandboxed_ptr)
+ {
+ static_assert(std::is_pointer_v<T>);
+ if (p == 0) {
+ return nullptr;
+ }
+ auto ret = T_Sbx::template impl_get_unsandboxed_pointer_no_ctx<T>(
+ p, example_unsandboxed_ptr, find_sandbox_from_example);
+ return reinterpret_cast<T>(ret);
+ }
+
+ template<typename T>
+ static inline convert_to_sandbox_equivalent_nonclass_t<T>
+ get_sandboxed_pointer_no_ctx(const void* p,
+ const void* example_unsandboxed_ptr)
+ {
+ static_assert(std::is_pointer_v<T>);
+ if (p == nullptr) {
+ return 0;
+ }
+ return T_Sbx::template impl_get_sandboxed_pointer_no_ctx<T>(
+ p, example_unsandboxed_ptr, find_sandbox_from_example);
+ }
+
+ /**
+ * @brief Allocate a new pointer that is accessible to both the application
+ * and sandbox. The pointer is allocated in sandbox memory.
+ *
+ * @tparam T The type of the pointer you want to create. If T=int, this
+ * would return a pointer to an int.
+ *
+ * @return tainted<T*, T_Sbx> Tainted pointer accessible to the application
+ * and sandbox.
+ */
+ template<typename T>
+ inline tainted<T*, T_Sbx> malloc_in_sandbox()
+ {
+ const uint32_t defaultCount = 1;
+ return malloc_in_sandbox<T>(defaultCount);
+ }
+
+ /**
+ * @brief Allocate an array that is accessible to both the application
+ * and sandbox. The pointer is allocated in sandbox memory.
+ *
+ * @tparam T The type of the array elements you want to create. If T=int, this
+ * would return a pointer to an array of ints.
+ *
+ * @param count The number of array elements to allocate.
+ *
+ * @return tainted<T*, T_Sbx> Tainted pointer accessible to the application
+ * and sandbox.
+ */
+ template<typename T>
+ inline tainted<T*, T_Sbx> malloc_in_sandbox(uint32_t count)
+ {
+ // Silently swallowing the failure is better here as RAII types may try to
+ // malloc after sandbox destruction
+ if (sandbox_created.load() != Sandbox_Status::CREATED) {
+ return tainted<T*, T_Sbx>::internal_factory(nullptr);
+ }
+
+ detail::dynamic_check(count != 0, "Malloc tried to allocate 0 bytes");
+ if constexpr (sizeof(T) >= std::numeric_limits<uint32_t>::max()) {
+ rlbox_detail_static_fail_because(sizeof(T) >=
+ std::numeric_limits<uint32_t>::max(),
+ "Tried to allocate an object over 4GB.");
+ }
+ auto total_size = static_cast<uint64_t>(sizeof(T)) * count;
+ if constexpr (sizeof(size_t) == 4) {
+ // On a 32-bit platform, we need to make sure that total_size is not >=4GB
+ detail::dynamic_check(total_size < std::numeric_limits<uint32_t>::max(),
+ "Tried to allocate memory over 4GB");
+ } else if constexpr (sizeof(size_t) != 8) {
+ // Double check we are on a 64-bit platform
+ // Note for static checks we need to have some dependence on T, so adding
+ // a dummy
+ constexpr bool dummy = sizeof(T) >= 0;
+ rlbox_detail_static_fail_because(dummy && sizeof(size_t) != 8,
+ "Expected 32 or 64 bit platform.");
+ }
+ auto ptr_in_sandbox = this->impl_malloc_in_sandbox(total_size);
+ auto ptr = get_unsandboxed_pointer<T*>(ptr_in_sandbox);
+ if (!ptr) {
+ return tainted<T*, T_Sbx>(nullptr);
+ }
+ detail::dynamic_check(is_pointer_in_sandbox_memory(ptr),
+ "Malloc returned pointer outside the sandbox memory");
+ auto ptr_end = reinterpret_cast<uintptr_t>(ptr + (count - 1));
+ detail::dynamic_check(
+ is_in_same_sandbox(ptr, reinterpret_cast<void*>(ptr_end)),
+ "Malloc returned a pointer whose range goes beyond sandbox memory");
+ auto cast_ptr = reinterpret_cast<T*>(ptr);
+ return tainted<T*, T_Sbx>::internal_factory(cast_ptr);
+ }
+
+ /**
+ * @brief Free the memory referenced by the tainted pointer.
+ *
+ * @param ptr Pointer to sandbox memory to free.
+ */
+ template<typename T>
+ inline void free_in_sandbox(tainted<T*, T_Sbx> ptr)
+ {
+ // Silently swallowing the failure is better here as RAII types may try to
+ // free after sandbox destruction
+ if (sandbox_created.load() != Sandbox_Status::CREATED) {
+ return;
+ }
+
+ this->impl_free_in_sandbox(ptr.get_raw_sandbox_value(*this));
+ }
+
+ /**
+ * @brief Free the memory referenced by a tainted_volatile pointer ref.
+ *
+ * @param ptr_ref Pointer reference to sandbox memory to free.
+ */
+ template<typename T>
+ inline void free_in_sandbox(tainted_volatile<T, T_Sbx>& ptr_ref)
+ {
+ tainted<T, T_Sbx> ptr = ptr_ref;
+ free_in_sandbox(ptr);
+ }
+
+ /**
+ * @brief Free the memory referenced by a tainted_opaque pointer.
+ *
+ * @param ptr_opaque Opaque pointer to sandbox memory to free.
+ */
+ template<typename T>
+ inline void free_in_sandbox(tainted_opaque<T, T_Sbx> ptr_opaque)
+ {
+ tainted<T, T_Sbx> ptr = from_opaque(ptr_opaque);
+ free_in_sandbox(ptr);
+ }
+
+ /**
+ * @brief Check if two pointers are in the same sandbox.
+ * For the null-sandbox, this always returns true.
+ */
+ static inline bool is_in_same_sandbox(const void* p1, const void* p2)
+ {
+ const size_t num_args =
+ detail::func_arg_nums_v<decltype(T_Sbx::impl_is_in_same_sandbox)>;
+ if constexpr (num_args == 2) {
+ return T_Sbx::impl_is_in_same_sandbox(p1, p2);
+ } else {
+ return T_Sbx::impl_is_in_same_sandbox(p1, p2, find_sandbox_from_example);
+ }
+ }
+
+ /**
+ * @brief Check if the pointer points to this sandbox's memory.
+ * For the null-sandbox, this always returns true.
+ */
+ inline bool is_pointer_in_sandbox_memory(const void* p)
+ {
+ return this->impl_is_pointer_in_sandbox_memory(p);
+ }
+
+ /**
+ * @brief Check if the pointer points to application memory.
+ * For the null-sandbox, this always returns true.
+ */
+ inline bool is_pointer_in_app_memory(const void* p)
+ {
+ return this->impl_is_pointer_in_app_memory(p);
+ }
+
+ inline size_t get_total_memory() { return this->impl_get_total_memory(); }
+
+ inline void* get_memory_location()
+ {
+ return this->impl_get_memory_location();
+ }
+
+ void* get_transition_state() { return transition_state; }
+
+ void set_transition_state(void* new_state) { transition_state = new_state; }
+
+ /**
+ * @brief For internal use only.
+ * Grant access of the passed in buffer in to the sandbox instance. Called by
+ * internal APIs only if the underlying sandbox supports
+ * can_grant_deny_access by including the line
+ * ```
+ * using can_grant_deny_access = void;
+ * ```
+ */
+ template<typename T>
+ inline tainted<T*, T_Sbx> INTERNAL_grant_access(T* src,
+ size_t num,
+ bool& success)
+ {
+ auto ret = this->impl_grant_access(src, num, success);
+ return tainted<T*, T_Sbx>::internal_factory(ret);
+ }
+
+ /**
+ * @brief For internal use only.
+ * Grant access of the passed in buffer in to the sandbox instance. Called by
+ * internal APIs only if the underlying sandbox supports
+ * can_grant_deny_access by including the line
+ * ```
+ * using can_grant_deny_access = void;
+ * ```
+ */
+ template<typename T>
+ inline T* INTERNAL_deny_access(tainted<T*, T_Sbx> src,
+ size_t num,
+ bool& success)
+ {
+ auto ret =
+ this->impl_deny_access(src.INTERNAL_unverified_safe(), num, success);
+ return ret;
+ }
+
+ void* lookup_symbol(const char* func_name)
+ {
+ {
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, func_ptr_cache_lock);
+
+ auto func_ptr_ref = func_ptr_map.find(func_name);
+ if (func_ptr_ref != func_ptr_map.end()) {
+ return func_ptr_ref->second;
+ }
+ }
+
+ void* func_ptr = this->impl_lookup_symbol(func_name);
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, func_ptr_cache_lock);
+ func_ptr_map[func_name] = func_ptr;
+ return func_ptr;
+ }
+
+ void* internal_lookup_symbol(const char* func_name)
+ {
+ {
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, func_ptr_cache_lock);
+
+ auto func_ptr_ref = func_ptr_map.find(func_name);
+ if (func_ptr_ref != func_ptr_map.end()) {
+ return func_ptr_ref->second;
+ }
+ }
+
+ void* func_ptr = 0;
+ if constexpr (rlbox::detail::
+ has_member_using_needs_internal_lookup_symbol_v<T_Sbx>) {
+ func_ptr = this->impl_internal_lookup_symbol(func_name);
+ } else {
+ func_ptr = this->impl_lookup_symbol(func_name);
+ }
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, func_ptr_cache_lock);
+ func_ptr_map[func_name] = func_ptr;
+ return func_ptr;
+ }
+
+ // this is an internal function invoked from macros, so it has be public
+ template<typename T, typename... T_Args>
+ inline auto INTERNAL_invoke_with_func_name(const char* func_name,
+ T_Args&&... params)
+ {
+ return INTERNAL_invoke_with_func_ptr<T, T_Args...>(
+ func_name, lookup_symbol(func_name), std::forward<T_Args>(params)...);
+ }
+
+ // this is an internal function invoked from macros, so it has be public
+ // Explicitly don't use inline on this, as this adds a lot of instructions
+ // prior to function call. What's more, by not inlining, different function
+ // calls with the same signature can share the same code segments for
+ // sandboxed function execution in the binary
+ template<typename T, typename... T_Args>
+ auto INTERNAL_invoke_with_func_ptr(const char* func_name,
+ void* func_ptr,
+ T_Args&&... params)
+ {
+ // unused in some paths
+ RLBOX_UNUSED(func_name);
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+ auto enter_time = high_resolution_clock::now();
+ auto on_exit = rlbox::detail::make_scope_exit([&] {
+ auto exit_time = high_resolution_clock::now();
+ int64_t ns = duration_cast<nanoseconds>(exit_time - enter_time).count();
+ transition_times.push_back(rlbox_transition_timing{
+ rlbox_transition::INVOKE, func_name, func_ptr, ns });
+ });
+#endif
+#ifdef RLBOX_TRANSITION_ACTION_IN
+ RLBOX_TRANSITION_ACTION_IN(
+ rlbox_transition::INVOKE, func_name, func_ptr, transition_state);
+#endif
+#ifdef RLBOX_TRANSITION_ACTION_OUT
+ auto on_exit_transition = rlbox::detail::make_scope_exit([&] {
+ RLBOX_TRANSITION_ACTION_OUT(
+ rlbox_transition::INVOKE, func_name, func_ptr, transition_state);
+ });
+#endif
+ (check_invoke_param_type_is_ok<T_Args>(), ...);
+
+ static_assert(
+ rlbox::detail::polyfill::is_invocable_v<
+ T,
+ detail::rlbox_remove_wrapper_t<std::remove_reference_t<T_Args>>...>,
+ "Mismatched arguments types for function");
+
+ using T_Result = rlbox::detail::polyfill::invoke_result_t<
+ T,
+ detail::rlbox_remove_wrapper_t<std::remove_reference_t<T_Args>>...>;
+
+ using T_Converted =
+ std::remove_pointer_t<convert_fn_ptr_to_sandbox_equivalent_t<T*>>;
+
+ if constexpr (std::is_void_v<T_Result>) {
+ this->template impl_invoke_with_func_ptr<T>(
+ reinterpret_cast<T_Converted*>(func_ptr),
+ invoke_process_param(params)...);
+ return;
+ } else {
+ auto raw_result = this->template impl_invoke_with_func_ptr<T>(
+ reinterpret_cast<T_Converted*>(func_ptr),
+ invoke_process_param(params)...);
+ tainted<T_Result, T_Sbx> wrapped_result;
+ using namespace detail;
+ convert_type<T_Sbx,
+ adjust_type_direction::TO_APPLICATION,
+ adjust_type_context::SANDBOX>(
+ wrapped_result.get_raw_value_ref(),
+ raw_result,
+ nullptr /* example_unsandboxed_ptr */,
+ this /* sandbox_ptr */);
+ return wrapped_result;
+ }
+ }
+
+ // Useful in the porting stage to temporarily allow non tainted pointers to go
+ // through. This will only ever work in the rlbox_noop_sandbox. Any sandbox
+ // that actually enforces isolation will crash here.
+ template<typename T2>
+ tainted<T2, T_Sbx> UNSAFE_accept_pointer(T2 ptr)
+ {
+ static_assert(std::is_pointer_v<T2>,
+ "UNSAFE_accept_pointer expects a pointer param");
+ tainted<T2, T_Sbx> ret;
+ ret.assign_raw_pointer(*this, ptr);
+ return ret;
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ using T_Cb_no_wrap = detail::rlbox_remove_wrapper_t<T_Ret>(
+ detail::rlbox_remove_wrapper_t<T_Args>...);
+
+ template<typename T_Ret>
+ sandbox_callback<T_Cb_no_wrap<T_Ret>*, T_Sbx> register_callback(T_Ret (*)())
+ {
+ rlbox_detail_static_fail_because(
+ detail::true_v<T_Ret>,
+ "Modify the callback to change the first parameter to a sandbox. "
+ "For instance if a callback has type\n\n"
+ "int foo() {...}\n\n"
+ "Change this to \n\n"
+ "tainted<int, T_Sbx> foo(rlbox_sandbox<T_Sbx>& sandbox) {...}\n");
+
+ // this is never executed, but we need it for the function to type-check
+ std::abort();
+ }
+
+ /**
+ * @brief Expose a callback function to the sandboxed code.
+ *
+ * @param func_ptr The callback to expose.
+ *
+ * @tparam T_RL Sandbox reference type (first argument).
+ * @tparam T_Ret Return type of callback. Must be tainted or void.
+ * @tparam T_Args Types of remaining callback arguments. Must be tainted.
+ *
+ * @return Wrapped callback function pointer that can be passed to the
+ * sandbox.
+ */
+ template<typename T_RL, typename T_Ret, typename... T_Args>
+ sandbox_callback<T_Cb_no_wrap<T_Ret, T_Args...>*, T_Sbx> register_callback(
+ T_Ret (*func_ptr)(T_RL, T_Args...))
+ {
+ // Some branches don't use the param
+ RLBOX_UNUSED(func_ptr);
+
+ if_constexpr_named(cond1, !std::is_same_v<T_RL, rlbox_sandbox<T_Sbx>&>)
+ {
+ rlbox_detail_static_fail_because(
+ cond1,
+ "Modify the callback to change the first parameter to a sandbox. "
+ "For instance if a callback has type\n\n"
+ "int foo(int a, int b) {...}\n\n"
+ "Change this to \n\n"
+ "tainted<int, T_Sbx> foo(rlbox_sandbox<T_Sbx>& sandbox, "
+ "tainted<int, T_Sbx> a, tainted<int, T_Sbx> b) {...}\n");
+ }
+ else if_constexpr_named(
+ cond2, !(detail::rlbox_is_tainted_or_opaque_v<T_Args> && ...))
+ {
+ rlbox_detail_static_fail_because(
+ cond2,
+ "Change all arguments to the callback have to be tainted or "
+ "tainted_opaque. "
+ "For instance if a callback has type\n\n"
+ "int foo(int a, int b) {...}\n\n"
+ "Change this to \n\n"
+ "tainted<int, T_Sbx> foo(rlbox_sandbox<T_Sbx>& sandbox, "
+ "tainted<int, T_Sbx> a, tainted<int, T_Sbx> b) {...}\n");
+ }
+ else if_constexpr_named(
+ cond3, (std::is_array_v<detail::rlbox_remove_wrapper_t<T_Args>> || ...))
+ {
+ rlbox_detail_static_fail_because(
+ cond3,
+ "Change all static array arguments to the callback to be pointers. "
+ "For instance if a callback has type\n\n"
+ "int foo(int a[4]) {...}\n\n"
+ "Change this to \n\n"
+ "tainted<int, T_Sbx> foo(rlbox_sandbox<T_Sbx>& sandbox, "
+ "tainted<int*, T_Sbx> a) {...}\n");
+ }
+ else if_constexpr_named(
+ cond4,
+ !(std::is_void_v<T_Ret> || detail::rlbox_is_tainted_or_opaque_v<T_Ret>))
+ {
+ rlbox_detail_static_fail_because(
+ cond4,
+ "Change the callback return type to be tainted or tainted_opaque if it "
+ "is not void. "
+ "For instance if a callback has type\n\n"
+ "int foo(int a, int b) {...}\n\n"
+ "Change this to \n\n"
+ "tainted<int, T_Sbx> foo(rlbox_sandbox<T_Sbx>& sandbox, "
+ "tainted<int, T_Sbx> a, tainted<int, T_Sbx> b) {...}\n");
+ }
+ else
+ {
+ detail::dynamic_check(
+ sandbox_created.load() == Sandbox_Status::CREATED,
+ "register_callback called without sandbox creation");
+
+ // Need unique key for each callback we register - just use the func addr
+ void* unique_key = reinterpret_cast<void*>(func_ptr);
+
+ // Make sure that the user hasn't previously registered this function...
+ // If they have, we would returning 2 owning types (sandbox_callback) to
+ // the same callback which would be bad
+ {
+ std::lock_guard<std::mutex> lock(callback_lock);
+ bool exists =
+ std::find(callback_keys.begin(), callback_keys.end(), unique_key) !=
+ callback_keys.end();
+ detail::dynamic_check(
+ !exists, "You have previously already registered this callback.");
+ callback_keys.push_back(unique_key);
+ }
+
+ auto callback_interceptor =
+ sandbox_callback_interceptor<detail::rlbox_remove_wrapper_t<T_Ret>,
+ detail::rlbox_remove_wrapper_t<T_Args>...>;
+
+ auto callback_trampoline = this->template impl_register_callback<
+ detail::convert_to_sandbox_equivalent_t<
+ detail::rlbox_remove_wrapper_t<T_Ret>,
+ T_Sbx>,
+ detail::convert_to_sandbox_equivalent_t<
+ detail::rlbox_remove_wrapper_t<T_Args>,
+ T_Sbx>...>(unique_key, reinterpret_cast<void*>(callback_interceptor));
+
+ auto tainted_func_ptr = reinterpret_cast<
+ detail::rlbox_tainted_opaque_to_tainted_t<T_Ret, T_Sbx> (*)(
+ T_RL, detail::rlbox_tainted_opaque_to_tainted_t<T_Args, T_Sbx>...)>(
+ reinterpret_cast<void*>(func_ptr));
+
+ auto ret = sandbox_callback<T_Cb_no_wrap<T_Ret, T_Args...>*, T_Sbx>(
+ this,
+ tainted_func_ptr,
+ callback_interceptor,
+ callback_trampoline,
+ unique_key);
+ return ret;
+ }
+ }
+
+ // this is an internal function invoked from macros, so it has be public
+ template<typename T>
+ inline tainted<T*, T_Sbx> INTERNAL_get_sandbox_function_name(
+ const char* func_name)
+ {
+ return INTERNAL_get_sandbox_function_ptr<T>(
+ internal_lookup_symbol(func_name));
+ }
+
+ // this is an internal function invoked from macros, so it has be public
+ template<typename T>
+ inline tainted<T*, T_Sbx> INTERNAL_get_sandbox_function_ptr(void* func_ptr)
+ {
+ return tainted<T*, T_Sbx>::internal_factory(reinterpret_cast<T*>(func_ptr));
+ }
+
+ /**
+ * @brief Create a "fake" pointer referring to a location in the application
+ * memory
+ *
+ * @param ptr The pointer to refer to
+ *
+ * @return The app_pointer object that refers to this location.
+ */
+ template<typename T>
+ app_pointer<T*, T_Sbx> get_app_pointer(T* ptr)
+ {
+ auto max_ptr = (typename T_Sbx::T_PointerType)(get_total_memory() - 1);
+ auto idx = app_ptr_map.get_app_pointer_idx((void*)ptr, max_ptr);
+ auto idx_as_ptr = this->template impl_get_unsandboxed_pointer<T>(idx);
+ // Right now we simply assume that any integer can be converted to a valid
+ // pointer in the sandbox This may not be true for some sandboxing mechanism
+ // plugins in the future In this case, we will have to come up with
+ // something more clever to construct indexes that look like valid pointers
+ // Add a check for now to make sure things work fine
+ detail::dynamic_check(is_pointer_in_sandbox_memory(idx_as_ptr),
+ "App pointers are not currently supported for this "
+ "rlbox sandbox plugin. Please file a bug.");
+ auto ret = app_pointer<T*, T_Sbx>(
+ &app_ptr_map, idx, reinterpret_cast<T*>(idx_as_ptr));
+ return ret;
+ }
+
+ /**
+ * @brief The mirror of get_app_pointer. Take a tainted pointer which is
+ * actually an app_pointer, and get the application location being pointed to
+ *
+ * @param tainted_ptr The tainted pointer that is actually an app_pointer
+ *
+ * @return The original location being referred to by the app_ptr
+ */
+ template<typename T>
+ T* lookup_app_ptr(tainted<T*, T_Sbx> tainted_ptr)
+ {
+ auto idx = tainted_ptr.get_raw_sandbox_value(*this);
+ void* ret = app_ptr_map.lookup_index(idx);
+ return reinterpret_cast<T*>(ret);
+ }
+
+#ifdef RLBOX_MEASURE_TRANSITION_TIMES
+ inline std::vector<rlbox_transition_timing>&
+ process_and_get_transition_times()
+ {
+ return transition_times;
+ }
+ inline int64_t get_total_ns_time_in_sandbox_and_transitions()
+ {
+ int64_t ret = 0;
+ for (auto& transition_time : transition_times) {
+ if (transition_time.invoke == rlbox_transition::INVOKE) {
+ ret += transition_time.time;
+ } else {
+ ret -= transition_time.time;
+ }
+ }
+ return ret;
+ }
+ inline void clear_transition_times() { transition_times.clear(); }
+#endif
+};
+
+#if defined(__clang__)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
+#elif defined(__GNUC__) || defined(__GNUG__)
+// Can't turn off the variadic macro warning emitted from -pedantic so use a
+// hack to stop GCC emitting warnings for the reminder of this file
+# pragma GCC system_header
+#elif defined(_MSC_VER)
+// Doesn't seem to emit the warning
+#else
+// Don't know the compiler... just let it go through
+#endif
+
+/**
+ * @def invoke_sandbox_function
+ * @brief Call sandbox function.
+ *
+ * @param func_name The sandboxed library function to call.
+ * @param ... Arguments to function should be simple or tainted values.
+ * @return Tainted value or void.
+ */
+#ifdef RLBOX_USE_STATIC_CALLS
+
+# define sandbox_lookup_symbol_helper(prefix, func_name) prefix(func_name)
+
+# define invoke_sandbox_function(func_name, ...) \
+ template INTERNAL_invoke_with_func_ptr<decltype(func_name)>( \
+ #func_name, \
+ sandbox_lookup_symbol_helper(RLBOX_USE_STATIC_CALLS(), func_name), \
+ ##__VA_ARGS__)
+
+# define get_sandbox_function_address(func_name) \
+ template INTERNAL_get_sandbox_function_ptr<decltype(func_name)>( \
+ sandbox_lookup_symbol_helper(RLBOX_USE_STATIC_CALLS(), func_name))
+
+#else
+
+# define invoke_sandbox_function(func_name, ...) \
+ template INTERNAL_invoke_with_func_name<decltype(func_name)>( \
+ #func_name, ##__VA_ARGS__)
+
+# define get_sandbox_function_address(func_name) \
+ template INTERNAL_get_sandbox_function_name<decltype(func_name)>(#func_name)
+
+#endif
+
+#define sandbox_invoke(sandbox, func_name, ...) \
+ (sandbox).invoke_sandbox_function(func_name, ##__VA_ARGS__)
+
+#define sandbox_function_address(sandbox, func_name) \
+ (sandbox).get_sandbox_function_address(func_name)
+
+#if defined(__clang__)
+# pragma clang diagnostic pop
+#else
+#endif
+
+}
diff --git a/third_party/rlbox/include/rlbox_stdlib.hpp b/third_party/rlbox/include/rlbox_stdlib.hpp
new file mode 100644
index 0000000000..cc60428077
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_stdlib.hpp
@@ -0,0 +1,329 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <cstring>
+#include <type_traits>
+
+#include "rlbox_helpers.hpp"
+#include "rlbox_types.hpp"
+#include "rlbox_unwrap.hpp"
+#include "rlbox_wrapper_traits.hpp"
+
+namespace rlbox {
+#define KEEP_CAST_FRIENDLY \
+ template<typename T_C_Lhs, \
+ typename T_C_Rhs, \
+ typename T_C_Sbx, \
+ template<typename, typename> \
+ typename T_C_Wrap> \
+ friend inline tainted<T_C_Lhs, T_C_Sbx> sandbox_reinterpret_cast( \
+ const T_C_Wrap<T_C_Rhs, T_C_Sbx>& rhs) noexcept; \
+ \
+ template<typename T_C_Lhs, \
+ typename T_C_Rhs, \
+ typename T_C_Sbx, \
+ template<typename, typename> \
+ typename T_C_Wrap> \
+ friend inline tainted<T_C_Lhs, T_C_Sbx> sandbox_const_cast( \
+ const T_C_Wrap<T_C_Rhs, T_C_Sbx>& rhs) noexcept; \
+ \
+ template<typename T_C_Lhs, \
+ typename T_C_Rhs, \
+ typename T_C_Sbx, \
+ template<typename, typename> \
+ typename T_C_Wrap> \
+ friend inline tainted<T_C_Lhs, T_C_Sbx> sandbox_static_cast( \
+ const T_C_Wrap<T_C_Rhs, T_C_Sbx>& rhs) noexcept;
+
+/**
+ * @brief The equivalent of a reinterpret_cast but operates on sandboxed values.
+ */
+template<typename T_Lhs,
+ typename T_Rhs,
+ typename T_Sbx,
+ template<typename, typename>
+ typename T_Wrap>
+inline tainted<T_Lhs, T_Sbx> sandbox_reinterpret_cast(
+ const T_Wrap<T_Rhs, T_Sbx>& rhs) noexcept
+{
+ static_assert(detail::rlbox_is_wrapper_v<T_Wrap<T_Rhs, T_Sbx>> &&
+ std::is_pointer_v<T_Lhs> && std::is_pointer_v<T_Rhs>,
+ "sandbox_reinterpret_cast on incompatible types");
+
+ tainted<T_Rhs, T_Sbx> taintedVal = rhs;
+ auto raw = reinterpret_cast<T_Lhs>(taintedVal.INTERNAL_unverified_safe());
+ auto ret = tainted<T_Lhs, T_Sbx>::internal_factory(raw);
+ return ret;
+}
+
+/**
+ * @brief The equivalent of a const_cast but operates on sandboxed values.
+ */
+template<typename T_Lhs,
+ typename T_Rhs,
+ typename T_Sbx,
+ template<typename, typename>
+ typename T_Wrap>
+inline tainted<T_Lhs, T_Sbx> sandbox_const_cast(
+ const T_Wrap<T_Rhs, T_Sbx>& rhs) noexcept
+{
+ static_assert(detail::rlbox_is_wrapper_v<T_Wrap<T_Rhs, T_Sbx>>,
+ "sandbox_const_cast on incompatible types");
+
+ tainted<T_Rhs, T_Sbx> taintedVal = rhs;
+ auto raw = const_cast<T_Lhs>(taintedVal.INTERNAL_unverified_safe());
+ auto ret = tainted<T_Lhs, T_Sbx>::internal_factory(raw);
+ return ret;
+}
+
+/**
+ * @brief The equivalent of a static_cast but operates on sandboxed values.
+ */
+template<typename T_Lhs,
+ typename T_Rhs,
+ typename T_Sbx,
+ template<typename, typename>
+ typename T_Wrap>
+inline tainted<T_Lhs, T_Sbx> sandbox_static_cast(
+ const T_Wrap<T_Rhs, T_Sbx>& rhs) noexcept
+{
+ static_assert(detail::rlbox_is_wrapper_v<T_Wrap<T_Rhs, T_Sbx>>,
+ "sandbox_static_cast on incompatible types");
+
+ tainted<T_Rhs, T_Sbx> taintedVal = rhs;
+ auto raw = static_cast<T_Lhs>(taintedVal.INTERNAL_unverified_safe());
+ auto ret = tainted<T_Lhs, T_Sbx>::internal_factory(raw);
+ return ret;
+}
+
+/**
+ * @brief Fill sandbox memory with a constant byte.
+ */
+template<typename T_Sbx,
+ typename T_Rhs,
+ typename T_Val,
+ typename T_Num,
+ template<typename, typename>
+ typename T_Wrap>
+inline T_Wrap<T_Rhs*, T_Sbx> memset(rlbox_sandbox<T_Sbx>& sandbox,
+ T_Wrap<T_Rhs*, T_Sbx> ptr,
+ T_Val value,
+ T_Num num)
+{
+
+ static_assert(detail::rlbox_is_tainted_or_vol_v<T_Wrap<T_Rhs, T_Sbx>>,
+ "memset called on non wrapped type");
+
+ static_assert(!std::is_const_v<T_Rhs>, "Destination is const");
+
+ auto num_val = detail::unwrap_value(num);
+ detail::dynamic_check(num_val <= sandbox.get_total_memory(),
+ "Called memset for memory larger than the sandbox");
+
+ tainted<T_Rhs*, T_Sbx> ptr_tainted = ptr;
+ void* dest_start = ptr_tainted.INTERNAL_unverified_safe();
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(dest_start, num_val);
+
+ std::memset(dest_start, detail::unwrap_value(value), num_val);
+ return ptr;
+}
+
+/**
+ * @brief Copy to sandbox memory area. Note that memcpy is meant to be called on
+ * byte arrays does not adjust data according to ABI differences. If the
+ * programmer does accidentally call memcpy on buffers that needs ABI
+ * adjustment, this may cause compatibility issues, but will not cause a
+ * security issue as the destination is always a tainted or tainted_volatile
+ * pointer
+ */
+template<typename T_Sbx,
+ typename T_Rhs,
+ typename T_Lhs,
+ typename T_Num,
+ template<typename, typename>
+ typename T_Wrap>
+inline T_Wrap<T_Rhs*, T_Sbx> memcpy(rlbox_sandbox<T_Sbx>& sandbox,
+ T_Wrap<T_Rhs*, T_Sbx> dest,
+ T_Lhs src,
+ T_Num num)
+{
+
+ static_assert(detail::rlbox_is_tainted_or_vol_v<T_Wrap<T_Rhs, T_Sbx>>,
+ "memcpy called on non wrapped type");
+
+ static_assert(!std::is_const_v<T_Rhs>, "Destination is const");
+
+ auto num_val = detail::unwrap_value(num);
+ detail::dynamic_check(num_val <= sandbox.get_total_memory(),
+ "Called memcpy for memory larger than the sandbox");
+
+ tainted<T_Rhs*, T_Sbx> dest_tainted = dest;
+ void* dest_start = dest_tainted.INTERNAL_unverified_safe();
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(dest_start, num_val);
+
+ // src also needs to be checked, as we don't want to allow a src rand to start
+ // inside the sandbox and end outside, and vice versa
+ // src may or may not be a wrapper, so use unwrap_value
+ const void* src_start = detail::unwrap_value(src);
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(src_start, num_val);
+
+ std::memcpy(dest_start, src_start, num_val);
+
+ return dest;
+}
+
+/**
+ * @brief Compare data in sandbox memory area.
+ */
+template<typename T_Sbx, typename T_Rhs, typename T_Lhs, typename T_Num>
+inline tainted_int_hint memcmp(rlbox_sandbox<T_Sbx>& sandbox,
+ T_Rhs&& dest,
+ T_Lhs&& src,
+ T_Num&& num)
+{
+ static_assert(
+ detail::rlbox_is_tainted_or_vol_v<detail::remove_cv_ref_t<T_Rhs>> ||
+ detail::rlbox_is_tainted_or_vol_v<detail::remove_cv_ref_t<T_Lhs>>,
+ "memcmp called on non wrapped type");
+
+ auto num_val = detail::unwrap_value(num);
+ detail::dynamic_check(num_val <= sandbox.get_total_memory(),
+ "Called memcmp for memory larger than the sandbox");
+
+ void* dest_start = dest.INTERNAL_unverified_safe();
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(dest_start, num_val);
+
+ // src also needs to be checked, as we don't want to allow a src rand to start
+ // inside the sandbox and end outside, and vice versa
+ // src may or may not be a wrapper, so use unwrap_value
+ const void* src_start = detail::unwrap_value(src);
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(src_start, num_val);
+
+ int ret = std::memcmp(dest_start, src_start, num_val);
+ tainted_int_hint converted_ret(ret);
+ return converted_ret;
+}
+
+/**
+ * @brief This function either
+ * - copies the given buffer into the sandbox calling delete on the src
+ * OR
+ * - if the sandbox allows, adds the buffer to the existing sandbox memory
+ * @param sandbox Target sandbox
+ * @param src Raw pointer to the buffer
+ * @param num Number of T-sized elements in the buffer
+ * @param free_source_on_copy If the source buffer was copied, this variable
+ * controls whether copy_memory_or_grant_access should call delete on the src.
+ * This calls delete[] if num > 1.
+ * @param copied out parameter indicating if the source was copied or transfered
+ */
+template<typename T_Sbx, typename T>
+tainted<T*, T_Sbx> copy_memory_or_grant_access(rlbox_sandbox<T_Sbx>& sandbox,
+ T* src,
+ size_t num,
+ bool free_source_on_copy,
+ bool& copied)
+{
+ copied = false;
+
+ // This function is meant for byte buffers only - so char and char16
+ static_assert(sizeof(T) <= 2);
+
+ // overflow ok
+ size_t source_size = num * sizeof(T);
+
+ // sandbox can grant access if it includes the following line
+ // using can_grant_deny_access = void;
+ if constexpr (detail::has_member_using_can_grant_deny_access_v<T_Sbx>) {
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(src, source_size);
+
+ bool success;
+ auto ret = sandbox.INTERNAL_grant_access(src, num, success);
+ if (success) {
+ return ret;
+ }
+ }
+
+ // Malloc in sandbox takes a uint32_t as the parameter, need a bounds check
+ detail::dynamic_check(num <= std::numeric_limits<uint32_t>::max(),
+ "Granting access too large a region");
+ using T_nocv = std::remove_cv_t<T>;
+ tainted<T_nocv*, T_Sbx> copy =
+ sandbox.template malloc_in_sandbox<T_nocv>(static_cast<uint32_t>(num));
+ if (!copy) {
+ return nullptr;
+ }
+
+ rlbox::memcpy(sandbox, copy, src, source_size);
+ if (free_source_on_copy) {
+ free(const_cast<void*>(reinterpret_cast<const void*>(src)));
+ }
+
+ copied = true;
+ return sandbox_const_cast<T*>(copy);
+}
+
+/**
+ * @brief This function either
+ * - copies the given buffer out of the sandbox calling free_in_sandbox on the
+ * src
+ * OR
+ * - if the sandbox allows, moves the buffer out of existing sandbox memory
+ * @param sandbox Target sandbox
+ * @param src Raw pointer to the buffer
+ * @param num Number of T-sized elements in the buffer
+ * @param free_source_on_copy If the source buffer was copied, this variable
+ * controls whether copy_memory_or_deny_access should call delete on the src.
+ * This calls delete[] if num > 1.
+ * @param copied out parameter indicating if the source was copied or transfered
+ */
+template<typename T_Sbx,
+ typename T,
+ template<typename, typename>
+ typename T_Wrap>
+T* copy_memory_or_deny_access(rlbox_sandbox<T_Sbx>& sandbox,
+ T_Wrap<T*, T_Sbx> src,
+ size_t num,
+ bool free_source_on_copy,
+ bool& copied)
+{
+ copied = false;
+
+ // This function is meant for byte buffers only - so char and char16
+ static_assert(sizeof(T) <= 2);
+
+ // overflow ok
+ size_t source_size = num * sizeof(T);
+
+ // sandbox can grant access if it includes the following line
+ // using can_grant_deny_access = void;
+ if constexpr (detail::has_member_using_can_grant_deny_access_v<T_Sbx>) {
+ detail::check_range_doesnt_cross_app_sbx_boundary<T_Sbx>(
+ src.INTERNAL_unverified_safe(), source_size);
+
+ bool success;
+ auto ret = sandbox.INTERNAL_deny_access(src, num, success);
+ if (success) {
+ return ret;
+ }
+ }
+
+ auto copy = static_cast<T*>(malloc(source_size));
+ if (!copy) {
+ return nullptr;
+ }
+
+ tainted<T*, T_Sbx> src_tainted = src;
+ char* src_raw = src_tainted.copy_and_verify_buffer_address(
+ [](uintptr_t val) { return reinterpret_cast<char*>(val); }, num);
+ std::memcpy(copy, src_raw, source_size);
+ if (free_source_on_copy) {
+ sandbox.free_in_sandbox(src);
+ }
+
+ copied = true;
+ return copy;
+}
+
+}
diff --git a/third_party/rlbox/include/rlbox_stdlib_polyfill.hpp b/third_party/rlbox/include/rlbox_stdlib_polyfill.hpp
new file mode 100644
index 0000000000..cf9c0117d0
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_stdlib_polyfill.hpp
@@ -0,0 +1,175 @@
+#pragma once
+
+// This file is a polyfill for parts of the C++ standard library available only
+// in newer compilers. Since these are only compile time requirements, we can
+// just include these as part of the rlbox library in case the target compiler
+// doesn't support these features. For instance clang-5 which rlbox supports
+// does not support std::invocable and related functionality in <type_traits>
+// and is polyfilled here.
+//
+// This code was borrowed from clang's standard library - libc++
+//
+// Link:
+// https://github.com/llvm-mirror/libcxx/blob/master/include/type_traits
+//
+// libc++ is dual licensed under the MIT license and the UIUC License (a
+// BSD-like license) and is therefore compatible with our code base
+
+// std::invocable and friends
+
+namespace rlbox::detail::polyfill {
+
+struct __nat
+{
+ __nat() = delete;
+ __nat(const __nat&) = delete;
+ __nat& operator=(const __nat&) = delete;
+ ~__nat() = delete;
+};
+
+template<bool _Val>
+using _BoolConstant = std::integral_constant<bool, _Val>;
+
+template<class _Tp, class _Up>
+using _IsNotSame = _BoolConstant<!std::is_same<_Tp, _Up>::value>;
+
+#define INVOKE_RETURN(...) \
+ noexcept(noexcept(__VA_ARGS__))->decltype(__VA_ARGS__) { return __VA_ARGS__; }
+
+template<class _Fp, class... _Args>
+inline auto helper__invoke(_Fp&& __f, _Args&&... __args)
+ INVOKE_RETURN(std::forward<_Fp>(__f)(std::forward<_Args>(__args)...))
+
+ template<class _Fp, class... _Args>
+ inline constexpr auto helper__invoke_constexpr(_Fp&& __f, _Args&&... __args)
+ INVOKE_RETURN(std::forward<_Fp>(__f)(std::forward<_Args>(__args)...))
+
+#undef INVOKE_RETURN
+
+ // __invokable
+ template<class _Ret, class _Fp, class... _Args>
+ struct __invokable_r
+{
+ template<class _XFp, class... _XArgs>
+ static auto __try_call(int)
+ -> decltype(helper__invoke(std::declval<_XFp>(),
+ std::declval<_XArgs>()...));
+ template<class _XFp, class... _XArgs>
+ static __nat __try_call(...);
+
+ // FIXME: Check that _Ret, _Fp, and _Args... are all complete types, cv void,
+ // or incomplete array types as required by the standard.
+ using _Result = decltype(__try_call<_Fp, _Args...>(0));
+
+ using type = typename std::conditional<
+ _IsNotSame<_Result, __nat>::value,
+ typename std::conditional<std::is_void<_Ret>::value,
+ std::true_type,
+ std::is_convertible<_Result, _Ret>>::type,
+ std::false_type>::type;
+ static const bool value = type::value;
+};
+template<class _Fp, class... _Args>
+using __invokable = __invokable_r<void, _Fp, _Args...>;
+
+template<bool _IsInvokable,
+ bool _IsCVVoid,
+ class _Ret,
+ class _Fp,
+ class... _Args>
+struct __nothrow_invokable_r_imp
+{
+ static const bool value = false;
+};
+
+template<class _Ret, class _Fp, class... _Args>
+struct __nothrow_invokable_r_imp<true, false, _Ret, _Fp, _Args...>
+{
+ typedef __nothrow_invokable_r_imp _ThisT;
+
+ template<class _Tp>
+ static void __test_noexcept(_Tp) noexcept;
+
+ static const bool value = noexcept(_ThisT::__test_noexcept<_Ret>(
+ helper__invoke(std::declval<_Fp>(), std::declval<_Args>()...)));
+};
+
+template<class _Ret, class _Fp, class... _Args>
+struct __nothrow_invokable_r_imp<true, true, _Ret, _Fp, _Args...>
+{
+ static const bool value =
+ noexcept(helper__invoke(std::declval<_Fp>(), std::declval<_Args>()...));
+};
+
+template<class _Ret, class _Fp, class... _Args>
+using __nothrow_invokable_r =
+ __nothrow_invokable_r_imp<__invokable_r<_Ret, _Fp, _Args...>::value,
+ std::is_void<_Ret>::value,
+ _Ret,
+ _Fp,
+ _Args...>;
+
+template<class _Fp, class... _Args>
+using __nothrow_invokable =
+ __nothrow_invokable_r_imp<__invokable<_Fp, _Args...>::value,
+ true,
+ void,
+ _Fp,
+ _Args...>;
+
+template<class _Fp, class... _Args>
+struct helper__invoke_of
+ : public std::enable_if<__invokable<_Fp, _Args...>::value,
+ typename __invokable_r<void, _Fp, _Args...>::_Result>
+{};
+
+// invoke_result
+
+template<class _Fn, class... _Args>
+struct invoke_result : helper__invoke_of<_Fn, _Args...>
+{};
+
+template<class _Fn, class... _Args>
+using invoke_result_t = typename invoke_result<_Fn, _Args...>::type;
+
+// is_invocable
+
+template<class _Fn, class... _Args>
+struct is_invocable
+ : std::integral_constant<bool, __invokable<_Fn, _Args...>::value>
+{};
+
+template<class _Ret, class _Fn, class... _Args>
+struct is_invocable_r
+ : std::integral_constant<bool, __invokable_r<_Ret, _Fn, _Args...>::value>
+{};
+
+template<class _Fn, class... _Args>
+inline constexpr bool is_invocable_v = is_invocable<_Fn, _Args...>::value;
+
+template<class _Ret, class _Fn, class... _Args>
+inline constexpr bool is_invocable_r_v =
+ is_invocable_r<_Ret, _Fn, _Args...>::value;
+
+// is_nothrow_invocable
+
+template<class _Fn, class... _Args>
+struct is_nothrow_invocable
+ : std::integral_constant<bool, __nothrow_invokable<_Fn, _Args...>::value>
+{};
+
+template<class _Ret, class _Fn, class... _Args>
+struct is_nothrow_invocable_r
+ : std::integral_constant<bool,
+ __nothrow_invokable_r<_Ret, _Fn, _Args...>::value>
+{};
+
+template<class _Fn, class... _Args>
+inline constexpr bool is_nothrow_invocable_v =
+ is_nothrow_invocable<_Fn, _Args...>::value;
+
+template<class _Ret, class _Fn, class... _Args>
+inline constexpr bool is_nothrow_invocable_r_v =
+ is_nothrow_invocable_r<_Ret, _Fn, _Args...>::value;
+
+}
diff --git a/third_party/rlbox/include/rlbox_struct_support.hpp b/third_party/rlbox/include/rlbox_struct_support.hpp
new file mode 100644
index 0000000000..b29d92aa6e
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_struct_support.hpp
@@ -0,0 +1,353 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <cstring>
+#include <functional>
+#include <type_traits>
+
+#include "rlbox_conversion.hpp"
+#include "rlbox_helpers.hpp"
+#include "rlbox_types.hpp"
+#include "rlbox_wrapper_traits.hpp"
+
+namespace rlbox::detail {
+
+template<typename T, typename T_Sbx, typename T_Enable = void>
+struct convert_to_sandbox_equivalent_helper;
+
+template<typename T, typename T_Sbx>
+struct convert_to_sandbox_equivalent_helper<
+ T,
+ T_Sbx,
+ std::enable_if_t<!std::is_class_v<T>>>
+{
+ using type = typename rlbox_sandbox<
+ T_Sbx>::template convert_to_sandbox_equivalent_nonclass_t<T>;
+};
+
+template<typename T, typename T_Sbx>
+using convert_to_sandbox_equivalent_t =
+ typename convert_to_sandbox_equivalent_helper<T, T_Sbx>::type;
+
+// This is used by rlbox_load_structs_from_library to test the current namespace
+struct markerStruct
+{};
+}
+
+#define helper_create_converted_field(fieldType, fieldName, isFrozen) \
+ typename detail::convert_to_sandbox_equivalent_t<fieldType, T_Sbx> fieldName;
+
+#define helper_no_op()
+
+#define sandbox_equivalent_specialization(T, libId) \
+ template<typename T_Sbx> \
+ struct Sbx_##libId##_##T \
+ { \
+ sandbox_fields_reflection_##libId##_class_##T( \
+ helper_create_converted_field, \
+ helper_no_op) \
+ }; \
+ \
+ /* add convert_to_sandbox_equivalent_t specialization for new struct */ \
+ namespace detail { \
+ template<typename T_Template, typename T_Sbx> \
+ struct convert_to_sandbox_equivalent_helper< \
+ T_Template, \
+ T_Sbx, \
+ std::enable_if_t<std::is_same_v<T_Template, T>>> \
+ { \
+ using type = Sbx_##libId##_##T<T_Sbx>; \
+ }; \
+ }
+
+#define helper_create_tainted_field( \
+ fieldType, fieldName, isFrozen, MaybeConst) \
+ MaybeConst tainted<fieldType, T_Sbx> fieldName;
+
+#define helper_create_tainted_vol_field( \
+ fieldType, fieldName, isFrozen, MaybeConst) \
+ MaybeConst tainted_volatile<fieldType, T_Sbx> fieldName;
+
+#define helper_convert_type(fieldType, fieldName, isFrozen) \
+ ::rlbox::detail::convert_type<T_Sbx, Direction, Context>( \
+ lhs.fieldName, rhs.fieldName, example_unsandboxed_ptr, sandbox_ptr);
+
+#define helper_find_example_pointer_or_null(fieldType, fieldName, isFrozen) \
+ { \
+ const void* ret = fieldName.find_example_pointer_or_null(); \
+ if (ret != nullptr) { \
+ return ret; \
+ } \
+ }
+
+#define tainted_data_specialization_helper(MaybeConst, T, libId) \
+ \
+ template<typename T_Sbx> \
+ class tainted_volatile<MaybeConst T, T_Sbx> \
+ { \
+ KEEP_CLASSES_FRIENDLY \
+ KEEP_CAST_FRIENDLY \
+ \
+ private: \
+ inline MaybeConst Sbx_##libId##_##T<T_Sbx>& \
+ get_sandbox_value_ref() noexcept \
+ { \
+ return *reinterpret_cast<MaybeConst Sbx_##libId##_##T<T_Sbx>*>(this); \
+ } \
+ \
+ inline const Sbx_##libId##_##T<T_Sbx>& get_sandbox_value_ref() \
+ const noexcept \
+ { \
+ return *reinterpret_cast<const Sbx_##libId##_##T<T_Sbx>*>(this); \
+ } \
+ \
+ inline T get_raw_value() const noexcept \
+ { \
+ T lhs; \
+ const auto& rhs = get_sandbox_value_ref(); \
+ constexpr auto Direction = \
+ detail::adjust_type_direction::TO_APPLICATION; \
+ constexpr auto Context = detail::adjust_type_context::EXAMPLE; \
+ /* This is a tainted_volatile, so its address is a valid example for use \
+ * as example_unsandboxed_ptr */ \
+ const void* example_unsandboxed_ptr = &rhs; \
+ rlbox_sandbox<T_Sbx>* sandbox_ptr = nullptr; \
+ sandbox_fields_reflection_##libId##_class_##T(helper_convert_type, \
+ helper_no_op) \
+ \
+ return lhs; \
+ } \
+ \
+ /* get_raw_sandbox_value has to return a custom struct to deal with the \
+ * adjusted machine model, to ensure */ \
+ inline Sbx_##libId##_##T<T_Sbx> get_raw_sandbox_value() const noexcept \
+ { \
+ auto ret_ptr = reinterpret_cast<const Sbx_##libId##_##T<T_Sbx>*>(this); \
+ return *ret_ptr; \
+ } \
+ \
+ tainted_volatile() = default; \
+ tainted_volatile(const tainted_volatile<MaybeConst T, T_Sbx>& p) = \
+ default; \
+ \
+ public: \
+ sandbox_fields_reflection_##libId##_class_##T( \
+ helper_create_tainted_vol_field, \
+ helper_no_op, \
+ MaybeConst) \
+ \
+ inline tainted<MaybeConst T*, T_Sbx> operator&() const noexcept \
+ { \
+ auto ref_cast = \
+ reinterpret_cast<MaybeConst T*>(&get_sandbox_value_ref()); \
+ auto ret = tainted<MaybeConst T*, T_Sbx>::internal_factory(ref_cast); \
+ return ret; \
+ } \
+ \
+ inline auto UNSAFE_unverified() const { return get_raw_value(); } \
+ inline auto UNSAFE_sandboxed(rlbox_sandbox<T_Sbx>& sandbox) const \
+ { \
+ return get_raw_sandbox_value(sandbox); \
+ } \
+ \
+ template<size_t N> \
+ inline auto unverified_safe_because(const char (&reason)[N]) const \
+ { \
+ RLBOX_UNUSED(reason); \
+ return UNSAFE_unverified(); \
+ } \
+ \
+ T copy_and_verify(std::function<T(tainted<T, T_Sbx>)> verifier) \
+ { \
+ tainted<T, T_Sbx> val(*this); \
+ return verifier(val); \
+ } \
+ \
+ /* Can't define this yet due, to mutually dependent definition between \
+ tainted and tainted_volatile for structs */ \
+ inline tainted_volatile<MaybeConst T, T_Sbx>& operator=( \
+ const tainted<T, T_Sbx>& rhs); \
+ }; \
+ \
+ template<typename T_Sbx> \
+ class tainted<MaybeConst T, T_Sbx> \
+ { \
+ KEEP_CLASSES_FRIENDLY \
+ KEEP_CAST_FRIENDLY \
+ \
+ private: \
+ inline MaybeConst T& get_raw_value_ref() noexcept \
+ { \
+ return *reinterpret_cast<MaybeConst T*>(this); \
+ } \
+ \
+ inline const T& get_raw_value_ref() const noexcept \
+ { \
+ return *reinterpret_cast<const T*>(this); \
+ } \
+ \
+ inline T get_raw_value() const noexcept \
+ { \
+ auto ret_ptr = reinterpret_cast<const T*>(this); \
+ return *ret_ptr; \
+ } \
+ \
+ /* get_raw_sandbox_value has to return a custom struct to deal with the \
+ * adjusted machine model, to ensure */ \
+ inline Sbx_##libId##_##T<T_Sbx> get_raw_sandbox_value( \
+ rlbox_sandbox<T_Sbx>& sandbox) const noexcept \
+ { \
+ Sbx_##libId##_##T<T_Sbx> lhs; \
+ const auto& rhs = get_raw_value_ref(); \
+ constexpr auto Direction = detail::adjust_type_direction::TO_SANDBOX; \
+ constexpr auto Context = detail::adjust_type_context::SANDBOX; \
+ const void* example_unsandboxed_ptr = nullptr; \
+ rlbox_sandbox<T_Sbx>* sandbox_ptr = &sandbox; \
+ sandbox_fields_reflection_##libId##_class_##T(helper_convert_type, \
+ helper_no_op) \
+ \
+ return lhs; \
+ } \
+ \
+ inline const void* find_example_pointer_or_null() const noexcept \
+ { \
+ sandbox_fields_reflection_##libId##_class_##T( \
+ helper_find_example_pointer_or_null, helper_no_op) \
+ \
+ return nullptr; \
+ } \
+ \
+ public: \
+ sandbox_fields_reflection_##libId##_class_##T(helper_create_tainted_field, \
+ helper_no_op, \
+ MaybeConst) \
+ \
+ tainted() = default; \
+ tainted(const tainted<MaybeConst T, T_Sbx>& p) = default; \
+ \
+ tainted(const tainted_volatile<T, T_Sbx>& p) \
+ { \
+ auto& lhs = get_raw_value_ref(); \
+ auto& rhs = p.get_sandbox_value_ref(); \
+ constexpr auto Direction = \
+ detail::adjust_type_direction::TO_APPLICATION; \
+ constexpr auto Context = detail::adjust_type_context::EXAMPLE; \
+ /* This is a tainted_volatile, so its address is a valid for use as */ \
+ /* example_unsandboxed_ptr */ \
+ const void* example_unsandboxed_ptr = &rhs; \
+ rlbox_sandbox<T_Sbx>* sandbox_ptr = nullptr; \
+ sandbox_fields_reflection_##libId##_class_##T(helper_convert_type, \
+ helper_no_op) \
+ } \
+ \
+ inline tainted_opaque<MaybeConst T, T_Sbx> to_opaque() \
+ { \
+ return *reinterpret_cast<tainted_opaque<MaybeConst T, T_Sbx>*>(this); \
+ } \
+ \
+ inline auto UNSAFE_unverified() const { return get_raw_value(); } \
+ inline auto UNSAFE_sandboxed(rlbox_sandbox<T_Sbx>& sandbox) const \
+ { \
+ return get_raw_sandbox_value(sandbox); \
+ } \
+ \
+ template<size_t N> \
+ inline auto unverified_safe_because(const char (&reason)[N]) const \
+ { \
+ RLBOX_UNUSED(reason); \
+ return UNSAFE_unverified(); \
+ } \
+ \
+ T copy_and_verify(std::function<T(tainted<T, T_Sbx>)> verifier) \
+ { \
+ return verifier(*this); \
+ } \
+ }; \
+ \
+ /* Had to delay the definition due, to mutually dependence between \
+ tainted and tainted_volatile for structs */ \
+ template<typename T_Sbx> \
+ inline tainted_volatile<MaybeConst T, T_Sbx>& \
+ tainted_volatile<MaybeConst T, T_Sbx>::operator=( \
+ const tainted<T, T_Sbx>& rhs_wrap) \
+ { \
+ auto& lhs = get_sandbox_value_ref(); \
+ auto& rhs = rhs_wrap.get_raw_value_ref(); \
+ constexpr auto Direction = detail::adjust_type_direction::TO_SANDBOX; \
+ constexpr auto Context = detail::adjust_type_context::EXAMPLE; \
+ /* This is a tainted_volatile, so its address is a valid example for */ \
+ /* use as example_unsandboxed_ptr */ \
+ const void* example_unsandboxed_ptr = &lhs; \
+ rlbox_sandbox<T_Sbx>* sandbox_ptr = nullptr; \
+ sandbox_fields_reflection_##libId##_class_##T(helper_convert_type, \
+ helper_no_op) \
+ \
+ return *this; \
+ }
+
+#define tainted_data_specialization(T, libId) \
+ tainted_data_specialization_helper( , T, libId) \
+ tainted_data_specialization_helper(const, T, libId)
+
+#define convert_type_specialization(T, libId) \
+ namespace detail { \
+ template<typename T_Sbx, \
+ detail::adjust_type_direction Direction, \
+ adjust_type_context Context, \
+ typename T_From> \
+ class convert_type_class<T_Sbx, Direction, Context, T, T_From> \
+ { \
+ public: \
+ static inline void run(T& lhs, \
+ const T_From& rhs, \
+ const void* example_unsandboxed_ptr, \
+ rlbox_sandbox<T_Sbx>* sandbox_ptr) \
+ { \
+ sandbox_fields_reflection_##libId##_class_##T(helper_convert_type, \
+ helper_no_op) \
+ } \
+ }; \
+ \
+ template<typename T_Sbx, \
+ detail::adjust_type_direction Direction, \
+ adjust_type_context Context, \
+ typename T_From> \
+ class convert_type_class<T_Sbx, \
+ Direction, \
+ Context, \
+ Sbx_##libId##_##T<T_Sbx>, \
+ T_From> \
+ { \
+ public: \
+ static inline void run(Sbx_##libId##_##T<T_Sbx>& lhs, \
+ const T_From& rhs, \
+ const void* example_unsandboxed_ptr, \
+ rlbox_sandbox<T_Sbx>* sandbox_ptr) \
+ { \
+ sandbox_fields_reflection_##libId##_class_##T(helper_convert_type, \
+ helper_no_op) \
+ } \
+ }; \
+ }
+
+// clang-format off
+#define rlbox_load_structs_from_library(libId) \
+ namespace rlbox { \
+ /* check that this macro is called in a global namespace */ \
+ static_assert( \
+ ::rlbox::detail::is_member_of_rlbox_detail<detail::markerStruct>, \
+ "Invoke rlbox_load_structs_from_library in the global namespace"); \
+ \
+ sandbox_fields_reflection_##libId##_allClasses( \
+ sandbox_equivalent_specialization) \
+ \
+ sandbox_fields_reflection_##libId##_allClasses( \
+ tainted_data_specialization) \
+ \
+ sandbox_fields_reflection_##libId##_allClasses( \
+ convert_type_specialization) \
+ } \
+ RLBOX_REQUIRE_SEMI_COLON
+
+// clang-format on \ No newline at end of file
diff --git a/third_party/rlbox/include/rlbox_type_traits.hpp b/third_party/rlbox/include/rlbox_type_traits.hpp
new file mode 100644
index 0000000000..b32e677c32
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_type_traits.hpp
@@ -0,0 +1,546 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <array>
+#include <type_traits>
+
+namespace rlbox::detail {
+
+#define RLBOX_ENABLE_IF(...) std::enable_if_t<__VA_ARGS__>* = nullptr
+
+template<typename T>
+constexpr bool true_v = true;
+
+template<typename T>
+constexpr bool is_fundamental_or_enum_v =
+ std::is_fundamental_v<T> || std::is_enum_v<T>;
+
+template<typename T>
+constexpr bool is_basic_type_v =
+ std::is_fundamental_v<T> || std::is_enum_v<T> || std::is_pointer_v<T>;
+
+template<typename T>
+using valid_return_t =
+ std::conditional_t<std::is_function_v<T>, void*, std::decay_t<T>>;
+
+template<typename T>
+using valid_param_t = std::conditional_t<std::is_void_v<T>, void*, T>;
+
+namespace func_first_arg_detail {
+ template<typename Ret, typename Arg, typename... Rest>
+ Arg func_first_arg_t_helper(Ret (*)(Arg, Rest...));
+
+ template<typename Ret, typename F, typename Arg, typename... Rest>
+ Arg func_first_arg_t_helper(Ret (F::*)(Arg, Rest...));
+
+ template<typename Ret, typename F, typename Arg, typename... Rest>
+ Arg func_first_arg_t_helper(Ret (F::*)(Arg, Rest...) const);
+
+ template<typename F>
+ decltype(func_first_arg_t_helper(&F::operator())) first_argument_helper(F);
+}
+
+template<typename T>
+using func_first_arg_t =
+ decltype(func_first_arg_detail::first_argument_helper(std::declval<T>()));
+
+namespace func_arg_nums_v_detail {
+ template<typename T_Ret, typename... T_Args>
+ constexpr size_t helper_two(T_Ret (*)(T_Args...))
+ {
+ return sizeof...(T_Args);
+ }
+ template<typename T_Func>
+ constexpr size_t helper()
+ {
+ constexpr T_Func* ptr = nullptr;
+ return helper_two(ptr);
+ }
+}
+
+template<typename T_Func>
+constexpr size_t func_arg_nums_v = func_arg_nums_v_detail::helper<T_Func>();
+
+template<typename T>
+using valid_array_el_t =
+ std::conditional_t<std::is_void_v<T> || std::is_function_v<T>, int, T>;
+
+template<typename T>
+constexpr bool is_func_ptr_v = (std::is_pointer_v<T> &&
+ std::is_function_v<std::remove_pointer_t<T>>) ||
+ std::is_member_function_pointer_v<T>;
+
+template<typename T>
+constexpr bool is_func_or_func_ptr = std::is_function_v<T> || is_func_ptr_v<T>;
+
+template<typename T>
+constexpr bool is_one_level_ptr_v =
+ std::is_pointer_v<T> && !std::is_pointer_v<std::remove_pointer_t<T>>;
+
+template<typename T_This, typename T_Target>
+using add_const_if_this_const_t =
+ std::conditional_t<std::is_const_v<std::remove_pointer_t<T_This>>,
+ std::add_const_t<T_Target>,
+ T_Target>;
+
+template<typename T>
+using remove_const_from_pointer = std::conditional_t<
+ std::is_pointer_v<T>,
+ std::add_pointer_t<std::remove_const_t<std::remove_pointer_t<T>>>,
+ T>;
+
+template<typename T>
+using add_const_from_pointer = std::conditional_t<
+ std::is_pointer_v<T>,
+ std::remove_pointer_t<std::add_const_t<std::remove_pointer_t<T>>>,
+ T>;
+
+template<typename T>
+using remove_cv_ref_t = std::remove_cv_t<std::remove_reference_t<T>>;
+
+template<typename T>
+using c_to_std_array_t =
+ std::conditional_t<std::is_array_v<T>,
+ std::array<std::remove_extent_t<T>, std::extent_v<T>>,
+ T>;
+
+namespace std_array_to_c_arr_detail {
+ template<typename T>
+ struct W
+ {
+ using type = T;
+ };
+
+ template<typename T, size_t N>
+ W<T[N]> std_array_to_c_arr_helper(std::array<T, N>);
+
+ template<typename T>
+ W<T> std_array_to_c_arr_helper(T&&);
+}
+
+template<typename T>
+using std_array_to_c_arr_t =
+ typename decltype(std_array_to_c_arr_detail::std_array_to_c_arr_helper(
+ std::declval<T>()))::type;
+
+template<typename T>
+using dereference_result_t =
+ std::conditional_t<std::is_pointer_v<T>,
+ std::remove_pointer_t<T>,
+ std::remove_extent_t<std_array_to_c_arr_t<T>> // is_array
+ >;
+
+template<typename T>
+using value_type_t =
+ std::conditional_t<std::is_array_v<T>, c_to_std_array_t<T>, T>;
+
+template<typename T>
+using function_ptr_t =
+ std::conditional_t<std::is_pointer_v<T> &&
+ std::is_function_v<std::remove_pointer_t<T>>,
+ T,
+ int (*)(int)>;
+
+namespace is_c_or_std_array_detail {
+ template<typename T, typename T_Enable = void>
+ struct is_c_or_std_array_helper;
+
+ template<typename T>
+ struct is_c_or_std_array_helper<T, std::enable_if_t<std::is_array_v<T>>>
+ : std::true_type
+ {};
+
+ template<typename T, size_t N>
+ std::true_type is_std_array_helper(std::array<T, N>*);
+
+ template<typename T>
+ std::false_type is_std_array_helper(T*);
+
+ template<typename T>
+ constexpr bool is_std_array_v =
+ decltype(is_std_array_helper(std::declval<std::add_pointer_t<T>>()))::value;
+
+ template<typename T>
+ struct is_c_or_std_array_helper<T, std::enable_if_t<is_std_array_v<T>>>
+ : std::true_type
+ {};
+
+ template<typename T>
+ struct is_c_or_std_array_helper<
+ T,
+ std::enable_if_t<!std::is_array_v<T> && !is_std_array_v<T>>>
+ : std::false_type
+ {};
+}
+
+template<typename T>
+constexpr bool is_std_array_v = is_c_or_std_array_detail::is_std_array_v<T>;
+
+template<typename T>
+constexpr bool is_c_or_std_array_v =
+ is_c_or_std_array_detail::is_c_or_std_array_helper<T>::value;
+
+namespace std_array_el_detail {
+ template<typename T>
+ struct W
+ {
+ using type = T;
+ };
+
+ template<typename T, size_t N>
+ W<T> is_std_array_helper(std::array<T, N>*);
+
+ template<typename T>
+ W<void> is_std_array_helper(T*);
+
+ template<typename T>
+ using std_array_el_t = decltype(std_array_el_detail::is_std_array_helper(
+ std::declval<std::add_pointer_t<T>>));
+}
+
+template<typename T>
+using std_array_el_t = typename std_array_el_detail::std_array_el_t<T>::type;
+
+namespace all_extents_same_detail {
+
+ template<typename T1, typename T2, typename T_Enable = void>
+ struct all_extents_same_helper;
+
+ template<typename T1, typename T2>
+ struct all_extents_same_helper<
+ T1,
+ T2,
+ std::enable_if_t<std::rank_v<T1> != std::rank_v<T2>>> : std::false_type
+ {};
+
+ template<typename T1, typename T2>
+ struct all_extents_same_helper<
+ T1,
+ T2,
+ std::enable_if_t<std::rank_v<T1> == std::rank_v<T2> &&
+ !std::is_array_v<T1> && !std::is_array_v<T2>>>
+ : std::true_type
+ {};
+
+ template<typename T1, typename T2>
+ struct all_extents_same_helper<
+ T1,
+ T2,
+ std::enable_if_t<std::rank_v<T1> == std::rank_v<T2> &&
+ std::is_array_v<T1> && std::is_array_v<T2> &&
+ std::extent_v<T1> != std::extent_v<T2>>> : std::false_type
+ {};
+
+ template<typename T1, typename T2>
+ struct all_extents_same_helper<
+ T1,
+ T2,
+ std::enable_if_t<std::rank_v<T1> == std::rank_v<T2> &&
+ std::is_array_v<T1> && std::is_array_v<T2> &&
+ std::extent_v<T1> == std::extent_v<T2>>>
+ {
+ static constexpr bool value =
+ all_extents_same_helper<std::remove_extent_t<T1>,
+ std::remove_extent_t<T2>>::value;
+ };
+}
+
+template<typename T1, typename T2>
+constexpr bool all_extents_same =
+ all_extents_same_detail::all_extents_same_helper<T1, T2>::value;
+
+// remove all pointers/extent types
+namespace remove_all_pointers_detail {
+ template<typename T>
+ struct remove_all_pointers
+ {
+ typedef T type;
+ };
+
+ template<typename T>
+ struct remove_all_pointers<T*>
+ {
+ typedef typename remove_all_pointers<T>::type type;
+ };
+}
+
+template<typename T>
+using remove_all_pointers_t =
+ typename remove_all_pointers_detail::remove_all_pointers<T>::type;
+
+// remove all pointers/extent types
+namespace base_type_detail {
+ template<typename T>
+ struct base_type
+ {
+ typedef T type;
+ };
+
+ template<typename T>
+ struct base_type<T*>
+ {
+ typedef typename base_type<T>::type type;
+ };
+
+ template<typename T>
+ struct base_type<T[]>
+ {
+ typedef typename base_type<T>::type type;
+ };
+
+ template<typename T, std::size_t N>
+ struct base_type<T[N]>
+ {
+ typedef typename base_type<T>::type type;
+ };
+}
+
+template<typename T>
+using base_type_t = typename base_type_detail::base_type<T>::type;
+
+// convert types
+namespace convert_detail {
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType,
+ typename T_Enable = void>
+ struct convert_base_types_t_helper;
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_same_v<short, T> && !std::is_const_v<T>>>
+ {
+ using type = T_ShortType;
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_same_v<int, T> && !std::is_const_v<T>>>
+ {
+ using type = T_IntType;
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_same_v<long, T> && !std::is_const_v<T>>>
+ {
+ using type = T_LongType;
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_same_v<long long, T> && !std::is_const_v<T>>>
+ {
+ using type = T_LongLongType;
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_pointer_v<T> && !std::is_const_v<T>>>
+ {
+ using type = T_PointerType;
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_unsigned_v<T> && !std::is_same_v<T, bool> &&
+ !std::is_same_v<T, char> && !std::is_const_v<T> &&
+ !std::is_enum_v<T>>>
+ {
+ using type = std::make_unsigned_t<
+ typename convert_base_types_t_helper<std::make_signed_t<T>,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType>::type>;
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<(
+ std::is_same_v<bool, T> || std::is_same_v<void, T> ||
+ std::is_same_v<char, T> || std::is_same_v<signed char, T> ||
+ std::is_floating_point_v<T> || std::is_enum_v<T>)&&!std::is_const_v<T>>>
+ {
+ using type = T;
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<
+ T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_array_v<T> && !std::is_const_v<T>>>
+ {
+ using type = typename convert_base_types_t_helper<
+ std::remove_extent_t<T>,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType>::type[std::extent_v<T>];
+ };
+
+ template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+ struct convert_base_types_t_helper<T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType,
+ std::enable_if_t<std::is_const_v<T>>>
+ {
+ using type = std::add_const_t<
+ typename convert_base_types_t_helper<std::remove_const_t<T>,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType>::type>;
+ };
+}
+
+template<typename T,
+ typename T_ShortType,
+ typename T_IntType,
+ typename T_LongType,
+ typename T_LongLongType,
+ typename T_PointerType>
+using convert_base_types_t =
+ typename convert_detail::convert_base_types_t_helper<T,
+ T_ShortType,
+ T_IntType,
+ T_LongType,
+ T_LongLongType,
+ T_PointerType>::type;
+
+namespace unsigned_int_of_size_t_detail {
+ template<typename T, typename T_Enable = void>
+ struct unsigned_int_of_size_t_helper;
+
+ template<typename T>
+ struct unsigned_int_of_size_t_helper<T, std::enable_if_t<sizeof(T) == 1>>
+ {
+ using type = uint8_t;
+ };
+
+ template<typename T>
+ struct unsigned_int_of_size_t_helper<T, std::enable_if_t<sizeof(T) == 2>>
+ {
+ using type = uint16_t;
+ };
+
+ template<typename T>
+ struct unsigned_int_of_size_t_helper<T, std::enable_if_t<sizeof(T) == 4>>
+ {
+ using type = uint32_t;
+ };
+
+ template<typename T>
+ struct unsigned_int_of_size_t_helper<T, std::enable_if_t<sizeof(T) == 8>>
+ {
+ using type = uint64_t;
+ };
+}
+
+template<typename T>
+using unsigned_int_of_size_t =
+ typename unsigned_int_of_size_t_detail::unsigned_int_of_size_t_helper<
+ T>::type;
+
+} \ No newline at end of file
diff --git a/third_party/rlbox/include/rlbox_types.hpp b/third_party/rlbox/include/rlbox_types.hpp
new file mode 100644
index 0000000000..b5821929da
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_types.hpp
@@ -0,0 +1,87 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+namespace rlbox {
+
+template<typename T, typename T_Sbx>
+class tainted_opaque
+{
+private:
+ T data{ 0 };
+
+public:
+ template<typename T2 = T>
+ void set_zero()
+ {
+ data = 0;
+ }
+};
+
+template<typename T, typename T_Sbx>
+class tainted;
+
+template<typename T, typename T_Sbx>
+class tainted_volatile;
+
+class tainted_boolean_hint;
+
+class tainted_int_hint;
+
+template<typename T_Sbx>
+class rlbox_sandbox;
+
+template<typename T, typename T_Sbx>
+class sandbox_callback;
+
+template<typename T, typename T_Sbx>
+class app_pointer;
+
+class rlbox_noop_sandbox;
+
+class rlbox_dylib_sandbox;
+}
+
+#define RLBOX_DEFINE_BASE_TYPES_FOR(SBXNAME, SBXTYPE) \
+ namespace rlbox { \
+ class rlbox_##SBXTYPE##_sandbox; \
+ } \
+ using rlbox_##SBXNAME##_sandbox_type = rlbox::rlbox_##SBXTYPE##_sandbox; \
+ using rlbox_sandbox_##SBXNAME = \
+ rlbox::rlbox_sandbox<rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using sandbox_callback_##SBXNAME = \
+ rlbox::sandbox_callback<T, rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using tainted_##SBXNAME = rlbox::tainted<T, rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using tainted_opaque_##SBXNAME = \
+ rlbox::tainted_opaque<T, rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using tainted_volatile_##SBXNAME = \
+ rlbox::tainted_volatile<T, rlbox_##SBXNAME##_sandbox_type>; \
+ using rlbox::tainted_boolean_hint; \
+ template<typename T> \
+ using app_pointer_##SBXNAME = \
+ rlbox::app_pointer<T, rlbox_##SBXNAME##_sandbox_type>;
+
+// This is like RLBOX_DEFINE_BASE_TYPES_FOR but with an explicit sandbox type
+#define RLBOX_DEFINE_BASE_TYPES_FOR_TYPE(SBXNAME, SBXTYPE) \
+ using rlbox_##SBXNAME##_sandbox_type = SBXTYPE; \
+ using rlbox_sandbox_##SBXNAME = \
+ rlbox::rlbox_sandbox<rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using sandbox_callback_##SBXNAME = \
+ rlbox::sandbox_callback<T, rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using tainted_##SBXNAME = rlbox::tainted<T, rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using tainted_opaque_##SBXNAME = \
+ rlbox::tainted_opaque<T, rlbox_##SBXNAME##_sandbox_type>; \
+ template<typename T> \
+ using tainted_volatile_##SBXNAME = \
+ rlbox::tainted_volatile<T, rlbox_##SBXNAME##_sandbox_type>; \
+ using rlbox::tainted_boolean_hint; \
+ template<typename T> \
+ using app_pointer_##SBXNAME = \
+ rlbox::app_pointer<T, rlbox_##SBXNAME##_sandbox_type>;
diff --git a/third_party/rlbox/include/rlbox_unwrap.hpp b/third_party/rlbox/include/rlbox_unwrap.hpp
new file mode 100644
index 0000000000..2cd00fe6ff
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_unwrap.hpp
@@ -0,0 +1,25 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <type_traits>
+
+#include "rlbox_type_traits.hpp"
+#include "rlbox_types.hpp"
+
+namespace rlbox::detail {
+
+template<typename T_Rhs>
+inline auto unwrap_value(T_Rhs&& rhs) noexcept
+{
+ using T_RhsNoQ = detail::remove_cv_ref_t<T_Rhs>;
+ if constexpr (detail::rlbox_is_wrapper_v<T_RhsNoQ>) {
+ return rhs.INTERNAL_unverified_safe();
+ } else if constexpr (detail::rlbox_is_tainted_boolean_hint_v<T_RhsNoQ>) {
+ return rhs.INTERNAL_unverified_safe();
+ } else {
+ return rhs;
+ }
+}
+
+} \ No newline at end of file
diff --git a/third_party/rlbox/include/rlbox_wrapper_traits.hpp b/third_party/rlbox/include/rlbox_wrapper_traits.hpp
new file mode 100644
index 0000000000..45ce5a93da
--- /dev/null
+++ b/third_party/rlbox/include/rlbox_wrapper_traits.hpp
@@ -0,0 +1,171 @@
+#pragma once
+// IWYU pragma: private, include "rlbox.hpp"
+// IWYU pragma: friend "rlbox_.*\.hpp"
+
+#include <type_traits>
+
+#include "rlbox_types.hpp"
+
+namespace rlbox::detail {
+
+#define rlbox_generate_wrapper_check(name) \
+ namespace detail_rlbox_is_##name \
+ { \
+ template<typename T> \
+ struct unwrapper : std::false_type \
+ {}; \
+ \
+ template<typename T, typename T_Sbx> \
+ struct unwrapper<name<T, T_Sbx>> : std::true_type \
+ {}; \
+ } \
+ \
+ template<typename T> \
+ constexpr bool rlbox_is_##name##_v = \
+ detail_rlbox_is_##name::unwrapper<T>::value; \
+ RLBOX_REQUIRE_SEMI_COLON
+
+rlbox_generate_wrapper_check(tainted);
+rlbox_generate_wrapper_check(tainted_volatile);
+rlbox_generate_wrapper_check(tainted_opaque);
+rlbox_generate_wrapper_check(sandbox_callback);
+
+#undef rlbox_generate_wrapper_check
+
+namespace detail_rlbox_is_tainted_boolean_hint {
+ template<typename T>
+ struct unwrapper : std::false_type
+ {};
+
+ template<>
+ struct unwrapper<tainted_boolean_hint> : std::true_type
+ {};
+}
+
+template<typename T>
+constexpr bool rlbox_is_tainted_boolean_hint_v =
+ detail_rlbox_is_tainted_boolean_hint::unwrapper<T>::value;
+
+template<typename T>
+constexpr bool rlbox_is_tainted_or_vol_v =
+ rlbox_is_tainted_v<T> || rlbox_is_tainted_volatile_v<T>;
+
+template<typename T>
+constexpr bool rlbox_is_tainted_or_opaque_v =
+ rlbox_is_tainted_v<T> || rlbox_is_tainted_opaque_v<T>;
+
+// tainted_hint is NOT considered a wrapper type... This carries no particular
+// significant and is just a convention choice
+template<typename T>
+constexpr bool rlbox_is_wrapper_v =
+ rlbox_is_tainted_v<T> || rlbox_is_tainted_volatile_v<T> ||
+ rlbox_is_tainted_opaque_v<T> || rlbox_is_sandbox_callback_v<T>;
+
+namespace detail_rlbox_remove_wrapper {
+ template<typename T>
+ struct unwrapper
+ {
+ using type = T;
+ using type_sbx = void;
+ };
+
+ template<typename T, typename T_Sbx>
+ struct unwrapper<tainted<T, T_Sbx>>
+ {
+ using type = T;
+ using type_sbx = T_Sbx;
+ };
+
+ template<typename T, typename T_Sbx>
+ struct unwrapper<tainted_volatile<T, T_Sbx>>
+ {
+ using type = T;
+ using type_sbx = T_Sbx;
+ };
+
+ template<typename T, typename T_Sbx>
+ struct unwrapper<tainted_opaque<T, T_Sbx>>
+ {
+ using type = T;
+ using type_sbx = T_Sbx;
+ };
+
+ template<typename T, typename T_Sbx>
+ struct unwrapper<sandbox_callback<T, T_Sbx>>
+ {
+ using type = T;
+ using type_sbx = T_Sbx;
+ };
+}
+
+template<typename T>
+using rlbox_remove_wrapper_t =
+ typename detail_rlbox_remove_wrapper::unwrapper<T>::type;
+
+template<typename T>
+using rlbox_get_wrapper_sandbox_t =
+ typename detail_rlbox_remove_wrapper::unwrapper<T>::type_sbx;
+
+template<typename T, typename T_Sbx>
+using rlbox_tainted_opaque_to_tainted_t =
+ std::conditional_t<rlbox_is_tainted_opaque_v<T>,
+ tainted<rlbox_remove_wrapper_t<T>, T_Sbx>,
+ T>;
+
+// https://stackoverflow.com/questions/34974844/check-if-a-type-is-from-a-particular-namespace
+namespace detail_is_member_of_rlbox_detail {
+ template<typename T, typename = void>
+ struct is_member_of_rlbox_detail_helper : std::false_type
+ {};
+
+ template<typename T>
+ struct is_member_of_rlbox_detail_helper<
+ T,
+ decltype(struct_is_member_of_rlbox_detail(std::declval<T>()))>
+ : std::true_type
+ {};
+}
+
+template<typename T>
+void struct_is_member_of_rlbox_detail(T&&);
+
+template<typename T>
+constexpr auto is_member_of_rlbox_detail =
+ detail_is_member_of_rlbox_detail::is_member_of_rlbox_detail_helper<T>::value;
+
+// https://stackoverflow.com/questions/9644477/how-to-check-whether-a-class-has-specified-nested-class-definition-or-typedef-in
+namespace detail_has_member_using_can_grant_deny_access {
+ template<class T, class Enable = void>
+ struct has_member_using_can_grant_deny_access : std::false_type
+ {};
+
+ template<class T>
+ struct has_member_using_can_grant_deny_access<
+ T,
+ std::void_t<typename T::can_grant_deny_access>> : std::true_type
+ {};
+}
+
+template<class T>
+constexpr bool has_member_using_can_grant_deny_access_v =
+ detail_has_member_using_can_grant_deny_access::
+ has_member_using_can_grant_deny_access<T>::value;
+
+namespace detail_has_member_using_needs_internal_lookup_symbol {
+ template<class T, class Enable = void>
+ struct has_member_using_needs_internal_lookup_symbol : std::false_type
+ {};
+
+ template<class T>
+ struct has_member_using_needs_internal_lookup_symbol<
+ T,
+ std::void_t<typename T::needs_internal_lookup_symbol>> : std::true_type
+ {};
+}
+
+template<class T>
+constexpr bool has_member_using_needs_internal_lookup_symbol_v =
+ detail_has_member_using_needs_internal_lookup_symbol::
+ has_member_using_needs_internal_lookup_symbol<T>::value;
+
+} \ No newline at end of file
diff --git a/third_party/rlbox/update.sh b/third_party/rlbox/update.sh
new file mode 100755
index 0000000000..28bb1eac2f
--- /dev/null
+++ b/third_party/rlbox/update.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+# Script to update the mozilla in-tree copy of the rlbox library.
+# Run this within the /third_party/rlbox directory of the source tree.
+
+MY_TEMP_DIR=`mktemp -d -t rlbox_update.XXXXXX` || exit 1
+
+git clone https://github.com/PLSysSec/rlbox_sandboxing_api ${MY_TEMP_DIR}/rlbox
+
+COMMIT=$(git -C ${MY_TEMP_DIR}/rlbox rev-parse HEAD)
+perl -p -i -e "s/\[commit [0-9a-f]{40}\]/[commit ${COMMIT}]/" README-mozilla;
+
+FILES="include"
+
+for f in $FILES; do
+ rm -rf $f
+ mv ${MY_TEMP_DIR}/rlbox/code/$f $f
+done
+
+rm -rf ${MY_TEMP_DIR}
+
+hg addremove $FILES
+
+echo "###"
+echo "### Updated rlbox to $COMMIT."
+echo "### Remember to update any newly added files to /config/external/rlbox/moz.build"
+echo "### Remember to verify and commit the changes to source control!"
+echo "###"
diff --git a/third_party/rlbox_wasm2c_sandbox/LICENSE b/third_party/rlbox_wasm2c_sandbox/LICENSE
new file mode 100755
index 0000000000..87e0ce55af
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 UCSD PLSysSec
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/rlbox_wasm2c_sandbox/c_src/wasm2c_sandbox_wrapper.c b/third_party/rlbox_wasm2c_sandbox/c_src/wasm2c_sandbox_wrapper.c
new file mode 100755
index 0000000000..99b1e7ec56
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/c_src/wasm2c_sandbox_wrapper.c
@@ -0,0 +1,7 @@
+#include <stdlib.h>
+
+int main(int argc, char *argv[]) {
+ (void) argc;
+ (void) argv;
+ abort();
+}
diff --git a/third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_sandbox.hpp b/third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_sandbox.hpp
new file mode 100644
index 0000000000..ed6975e27e
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_sandbox.hpp
@@ -0,0 +1,971 @@
+#pragma once
+
+#include "rlbox_wasm2c_tls.hpp"
+#include "wasm-rt.h"
+#include "wasm2c_rt_mem.h"
+#include "wasm2c_rt_minwasi.h"
+
+// Pull the helper header from the main repo for dynamic_check and scope_exit
+#include "rlbox_helpers.hpp"
+
+#include <cstdint>
+#include <iostream>
+#include <limits>
+#include <map>
+#include <memory>
+#include <mutex>
+// RLBox allows applications to provide a custom shared lock implementation
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# include <shared_mutex>
+#endif
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#if defined(_WIN32)
+// Ensure the min/max macro in the header doesn't collide with functions in
+// std::
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <windows.h>
+#else
+# include <dlfcn.h>
+#endif
+
+#define RLBOX_WASM2C_UNUSED(...) (void)__VA_ARGS__
+
+// Use the same convention as rlbox to allow applications to customize the
+// shared lock
+#ifndef RLBOX_USE_CUSTOM_SHARED_LOCK
+# define RLBOX_SHARED_LOCK(name) std::shared_timed_mutex name
+# define RLBOX_ACQUIRE_SHARED_GUARD(name, ...) \
+ std::shared_lock<std::shared_timed_mutex> name(__VA_ARGS__)
+# define RLBOX_ACQUIRE_UNIQUE_GUARD(name, ...) \
+ std::unique_lock<std::shared_timed_mutex> name(__VA_ARGS__)
+#else
+# if !defined(RLBOX_SHARED_LOCK) || !defined(RLBOX_ACQUIRE_SHARED_GUARD) || \
+ !defined(RLBOX_ACQUIRE_UNIQUE_GUARD)
+# error \
+ "RLBOX_USE_CUSTOM_SHARED_LOCK defined but missing definitions for RLBOX_SHARED_LOCK, RLBOX_ACQUIRE_SHARED_GUARD, RLBOX_ACQUIRE_UNIQUE_GUARD"
+# endif
+#endif
+
+#define DEFINE_RLBOX_WASM2C_MODULE_TYPE(modname) \
+ struct rlbox_wasm2c_module_type_##modname \
+ { \
+ using instance_t = w2c_##modname; \
+ \
+ using create_instance_t = void (*)(instance_t*, \
+ struct w2c_env*, \
+ struct w2c_wasi__snapshot__preview1*); \
+ static constexpr create_instance_t create_instance = \
+ &wasm2c_##modname##_instantiate; \
+ \
+ using free_instance_t = void (*)(instance_t*); \
+ static constexpr free_instance_t free_instance = &wasm2c_##modname##_free; \
+ \
+ using get_func_type_t = wasm_rt_func_type_t (*)(uint32_t, uint32_t, ...); \
+ static constexpr get_func_type_t get_func_type = \
+ &wasm2c_##modname##_get_func_type; \
+ \
+ static constexpr const uint64_t* initial_memory_pages = \
+ &wasm2c_##modname##_min_env_memory; \
+ static constexpr const uint8_t* is_memory_64 = \
+ &wasm2c_##modname##_is64_env_memory; \
+ static constexpr const uint32_t* initial_func_elements = \
+ &wasm2c_##modname##_min_env_0x5F_indirect_function_table; \
+ \
+ static constexpr const char* prefix = #modname; \
+ \
+ /* A function that returns the address of the func specified as a \
+ * constexpr string */ \
+ /* Unfortunately, there is no way to implement the below in C++. */ \
+ /* Implement this to fully support multiple static modules. */ \
+ /* static constexpr void* dlsym_in_w2c_module(const char* func_name) { */ \
+ /* return &w2c_##modname##_%func%; */ \
+ /* } */ \
+ \
+ static constexpr auto malloc_address = &w2c_##modname##_malloc; \
+ static constexpr auto free_address = &w2c_##modname##_free; \
+ }
+
+// wasm_module_name module name used when compiling with wasm2c
+#ifndef RLBOX_WASM2C_MODULE_NAME
+# error "Expected definition for RLBOX_WASM2C_MODULE_NAME"
+#endif
+
+// Need an extra macro to expand RLBOX_WASM2C_MODULE_NAME
+#define INVOKE_DEFINE_RLBOX_WASM2C_MODULE_TYPE(modname) \
+ DEFINE_RLBOX_WASM2C_MODULE_TYPE(modname)
+
+INVOKE_DEFINE_RLBOX_WASM2C_MODULE_TYPE(RLBOX_WASM2C_MODULE_NAME);
+
+// Concat after macro expansion
+#define RLBOX_WASM2C_CONCAT2(x, y) x##y
+#define RLBOX_WASM2C_CONCAT(x, y) RLBOX_WASM2C_CONCAT2(x, y)
+
+#define RLBOX_WASM_MODULE_TYPE_CURR \
+ RLBOX_WASM2C_CONCAT(rlbox_wasm2c_module_type_, RLBOX_WASM2C_MODULE_NAME)
+
+#define RLBOX_WASM2C_STRINGIFY(x) RLBOX_WASM2C_STRINGIFY2(x)
+#define RLBOX_WASM2C_STRINGIFY2(x) #x
+
+#define RLBOX_WASM2C_MODULE_NAME_STR \
+ RLBOX_WASM2C_STRINGIFY(RLBOX_WASM2C_MODULE_NAME)
+
+#define RLBOX_WASM2C_MODULE_FUNC_HELPER2(part1, part2, part3) \
+ part1##part2##part3
+#define RLBOX_WASM2C_MODULE_FUNC_HELPER(part1, part2, part3) \
+ RLBOX_WASM2C_MODULE_FUNC_HELPER2(part1, part2, part3)
+#define RLBOX_WASM2C_MODULE_FUNC(name) \
+ RLBOX_WASM2C_MODULE_FUNC_HELPER(w2c_, RLBOX_WASM2C_MODULE_NAME, name)
+
+namespace rlbox {
+
+namespace wasm2c_detail {
+
+ template<typename T>
+ constexpr bool false_v = false;
+
+ // https://stackoverflow.com/questions/6512019/can-we-get-the-type-of-a-lambda-argument
+ namespace return_argument_detail {
+ template<typename Ret, typename... Rest>
+ Ret helper(Ret (*)(Rest...));
+
+ template<typename Ret, typename F, typename... Rest>
+ Ret helper(Ret (F::*)(Rest...));
+
+ template<typename Ret, typename F, typename... Rest>
+ Ret helper(Ret (F::*)(Rest...) const);
+
+ template<typename F>
+ decltype(helper(&F::operator())) helper(F);
+ } // namespace return_argument_detail
+
+ template<typename T>
+ using return_argument =
+ decltype(return_argument_detail::helper(std::declval<T>()));
+
+ ///////////////////////////////////////////////////////////////
+
+ // https://stackoverflow.com/questions/37602057/why-isnt-a-for-loop-a-compile-time-expression
+ namespace compile_time_for_detail {
+ template<std::size_t N>
+ struct num
+ {
+ static const constexpr auto value = N;
+ };
+
+ template<class F, std::size_t... Is>
+ inline void compile_time_for_helper(F func, std::index_sequence<Is...>)
+ {
+ (func(num<Is>{}), ...);
+ }
+ } // namespace compile_time_for_detail
+
+ template<std::size_t N, typename F>
+ inline void compile_time_for(F func)
+ {
+ compile_time_for_detail::compile_time_for_helper(
+ func, std::make_index_sequence<N>());
+ }
+
+ ///////////////////////////////////////////////////////////////
+
+ template<typename T, typename = void>
+ struct convert_type_to_wasm_type
+ {
+ static_assert(std::is_void_v<T>, "Missing specialization");
+ using type = void;
+ // wasm2c has no void type so use i32 for now
+ static constexpr wasm_rt_type_t wasm2c_type = WASM_RT_I32;
+ };
+
+ template<typename T>
+ struct convert_type_to_wasm_type<
+ T,
+ std::enable_if_t<(std::is_integral_v<T> || std::is_enum_v<T>)&&sizeof(T) <=
+ sizeof(uint32_t)>>
+ {
+ using type = uint32_t;
+ static constexpr wasm_rt_type_t wasm2c_type = WASM_RT_I32;
+ };
+
+ template<typename T>
+ struct convert_type_to_wasm_type<
+ T,
+ std::enable_if_t<(std::is_integral_v<T> ||
+ std::is_enum_v<T>)&&sizeof(uint32_t) < sizeof(T) &&
+ sizeof(T) <= sizeof(uint64_t)>>
+ {
+ using type = uint64_t;
+ static constexpr wasm_rt_type_t wasm2c_type = WASM_RT_I64;
+ };
+
+ template<typename T>
+ struct convert_type_to_wasm_type<T,
+ std::enable_if_t<std::is_same_v<T, float>>>
+ {
+ using type = T;
+ static constexpr wasm_rt_type_t wasm2c_type = WASM_RT_F32;
+ };
+
+ template<typename T>
+ struct convert_type_to_wasm_type<T,
+ std::enable_if_t<std::is_same_v<T, double>>>
+ {
+ using type = T;
+ static constexpr wasm_rt_type_t wasm2c_type = WASM_RT_F64;
+ };
+
+ template<typename T>
+ struct convert_type_to_wasm_type<
+ T,
+ std::enable_if_t<std::is_pointer_v<T> || std::is_class_v<T>>>
+ {
+ // pointers are 32 bit indexes in wasm
+ // class paramters are passed as a pointer to an object in the stack or heap
+ using type = uint32_t;
+ static constexpr wasm_rt_type_t wasm2c_type = WASM_RT_I32;
+ };
+
+ ///////////////////////////////////////////////////////////////
+
+ namespace prepend_arg_type_detail {
+ template<typename T, typename T_ArgNew>
+ struct helper;
+
+ template<typename T_ArgNew, typename T_Ret, typename... T_Args>
+ struct helper<T_Ret(T_Args...), T_ArgNew>
+ {
+ using type = T_Ret(T_ArgNew, T_Args...);
+ };
+ }
+
+ template<typename T_Func, typename T_ArgNew>
+ using prepend_arg_type =
+ typename prepend_arg_type_detail::helper<T_Func, T_ArgNew>::type;
+
+ ///////////////////////////////////////////////////////////////
+
+ namespace change_return_type_detail {
+ template<typename T, typename T_RetNew>
+ struct helper;
+
+ template<typename T_RetNew, typename T_Ret, typename... T_Args>
+ struct helper<T_Ret(T_Args...), T_RetNew>
+ {
+ using type = T_RetNew(T_Args...);
+ };
+ }
+
+ template<typename T_Func, typename T_RetNew>
+ using change_return_type =
+ typename change_return_type_detail::helper<T_Func, T_RetNew>::type;
+
+ ///////////////////////////////////////////////////////////////
+
+ namespace change_class_arg_types_detail {
+ template<typename T, typename T_ArgNew>
+ struct helper;
+
+ template<typename T_ArgNew, typename T_Ret, typename... T_Args>
+ struct helper<T_Ret(T_Args...), T_ArgNew>
+ {
+ using type =
+ T_Ret(std::conditional_t<std::is_class_v<T_Args>, T_ArgNew, T_Args>...);
+ };
+ }
+
+ template<typename T_Func, typename T_ArgNew>
+ using change_class_arg_types =
+ typename change_class_arg_types_detail::helper<T_Func, T_ArgNew>::type;
+
+} // namespace wasm2c_detail
+
+// declare the static symbol with weak linkage to keep this header only
+#if defined(_MSC_VER)
+__declspec(selectany)
+#else
+__attribute__((weak))
+#endif
+ std::once_flag rlbox_wasm2c_initialized;
+
+class rlbox_wasm2c_sandbox
+{
+public:
+ using T_LongLongType = int64_t;
+ using T_LongType = int32_t;
+ using T_IntType = int32_t;
+ using T_PointerType = uint32_t;
+ using T_ShortType = int16_t;
+
+private:
+ mutable typename RLBOX_WASM_MODULE_TYPE_CURR::instance_t wasm2c_instance{ 0 };
+ struct w2c_env sandbox_memory_env;
+ struct w2c_wasi__snapshot__preview1 wasi_env;
+ bool instance_initialized = false;
+ wasm_rt_memory_t sandbox_memory_info;
+ mutable wasm_rt_funcref_table_t sandbox_callback_table;
+ uintptr_t heap_base;
+ size_t return_slot_size = 0;
+ T_PointerType return_slot = 0;
+ mutable std::vector<T_PointerType> callback_free_list;
+
+ static const size_t MAX_CALLBACKS = 128;
+ mutable RLBOX_SHARED_LOCK(callback_mutex);
+ void* callback_unique_keys[MAX_CALLBACKS]{ 0 };
+ void* callbacks[MAX_CALLBACKS]{ 0 };
+ uint32_t callback_slot_assignment[MAX_CALLBACKS]{ 0 };
+ mutable std::map<const void*, uint32_t> internal_callbacks;
+ mutable std::map<uint32_t, const void*> slot_assignments;
+
+#ifndef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ thread_local static inline rlbox_wasm2c_sandbox_thread_data thread_data{ 0,
+ 0 };
+#endif
+
+ template<typename T_FormalRet, typename T_ActualRet>
+ inline auto serialize_to_sandbox(T_ActualRet arg)
+ {
+ if constexpr (std::is_class_v<T_FormalRet>) {
+ // structs returned as pointers into wasm memory/wasm stack
+ auto ptr = reinterpret_cast<T_FormalRet*>(
+ impl_get_unsandboxed_pointer<T_FormalRet*>(arg));
+ T_FormalRet ret = *ptr;
+ return ret;
+ } else {
+ return arg;
+ }
+ }
+
+ template<uint32_t N, typename T_Ret, typename... T_Args>
+ static typename wasm2c_detail::convert_type_to_wasm_type<T_Ret>::type
+ callback_interceptor(
+ void* /* vmContext */,
+ typename wasm2c_detail::convert_type_to_wasm_type<T_Args>::type... params)
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_wasm2c_sandbox_thread_data();
+#endif
+ thread_data.last_callback_invoked = N;
+ using T_Func = T_Ret (*)(T_Args...);
+ T_Func func;
+ {
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, thread_data.sandbox->callback_mutex);
+#endif
+ func = reinterpret_cast<T_Func>(thread_data.sandbox->callbacks[N]);
+ }
+ // Callbacks are invoked through function pointers, cannot use std::forward
+ // as we don't have caller context for T_Args, which means they are all
+ // effectively passed by value
+ return func(
+ thread_data.sandbox->template serialize_to_sandbox<T_Args>(params)...);
+ }
+
+ template<uint32_t N, typename T_Ret, typename... T_Args>
+ static void callback_interceptor_promoted(
+ void* /* vmContext */,
+ typename wasm2c_detail::convert_type_to_wasm_type<T_Ret>::type ret,
+ typename wasm2c_detail::convert_type_to_wasm_type<T_Args>::type... params)
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_wasm2c_sandbox_thread_data();
+#endif
+ thread_data.last_callback_invoked = N;
+ using T_Func = T_Ret (*)(T_Args...);
+ T_Func func;
+ {
+#ifndef RLBOX_SINGLE_THREADED_INVOCATIONS
+ RLBOX_ACQUIRE_SHARED_GUARD(lock, thread_data.sandbox->callback_mutex);
+#endif
+ func = reinterpret_cast<T_Func>(thread_data.sandbox->callbacks[N]);
+ }
+ // Callbacks are invoked through function pointers, cannot use std::forward
+ // as we don't have caller context for T_Args, which means they are all
+ // effectively passed by value
+ auto ret_val = func(
+ thread_data.sandbox->template serialize_to_sandbox<T_Args>(params)...);
+ // Copy the return value back
+ auto ret_ptr = reinterpret_cast<T_Ret*>(
+ thread_data.sandbox->template impl_get_unsandboxed_pointer<T_Ret*>(ret));
+ *ret_ptr = ret_val;
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ inline wasm_rt_func_type_t get_wasm2c_func_index(
+ // dummy for template inference
+ T_Ret (*)(T_Args...) = nullptr) const
+ {
+ // Class return types as promoted to args
+ constexpr bool promoted = std::is_class_v<T_Ret>;
+ constexpr uint32_t param_count =
+ promoted ? (sizeof...(T_Args) + 1) : (sizeof...(T_Args));
+ constexpr uint32_t ret_count =
+ promoted ? 0 : (std::is_void_v<T_Ret> ? 0 : 1);
+
+ wasm_rt_func_type_t ret = nullptr;
+ if constexpr (ret_count == 0) {
+ ret = RLBOX_WASM_MODULE_TYPE_CURR::get_func_type(
+ param_count,
+ ret_count,
+ wasm2c_detail::convert_type_to_wasm_type<T_Args>::wasm2c_type...);
+ } else {
+ ret = RLBOX_WASM_MODULE_TYPE_CURR::get_func_type(
+ param_count,
+ ret_count,
+ wasm2c_detail::convert_type_to_wasm_type<T_Args>::wasm2c_type...,
+ wasm2c_detail::convert_type_to_wasm_type<T_Ret>::wasm2c_type);
+ }
+
+ return ret;
+ }
+
+ void ensure_return_slot_size(size_t size)
+ {
+ if (size > return_slot_size) {
+ if (return_slot_size) {
+ impl_free_in_sandbox(return_slot);
+ }
+ return_slot = impl_malloc_in_sandbox(size);
+ detail::dynamic_check(
+ return_slot != 0,
+ "Error initializing return slot. Sandbox may be out of memory!");
+ return_slot_size = size;
+ }
+ }
+
+ // function takes a 32-bit value and returns the next power of 2
+ // return is a 64-bit value as large 32-bit values will return 2^32
+ static inline uint64_t next_power_of_two(uint32_t value)
+ {
+ uint64_t power = 1;
+ while (power < value) {
+ power *= 2;
+ }
+ return power;
+ }
+
+protected:
+#define rlbox_wasm2c_sandbox_lookup_symbol(func_name) \
+ reinterpret_cast<void*>(&RLBOX_WASM2C_MODULE_FUNC(_##func_name)) /* NOLINT \
+ */
+
+ // adding a template so that we can use static_assert to fire only if this
+ // function is invoked
+ template<typename T = void>
+ void* impl_lookup_symbol(const char* func_name)
+ {
+ constexpr bool fail = std::is_same_v<T, void>;
+ static_assert(
+ !fail,
+ "The wasm2c_sandbox uses static calls and thus developers should add\n\n"
+ "#define RLBOX_USE_STATIC_CALLS() rlbox_wasm2c_sandbox_lookup_symbol\n\n"
+ "to their code, to ensure that static calls are handled correctly.");
+ return nullptr;
+ }
+
+public:
+#define FALLIBLE_DYNAMIC_CHECK(infallible, cond, msg) \
+ if (infallible) { \
+ detail::dynamic_check(cond, msg); \
+ } else if (!(cond)) { \
+ impl_destroy_sandbox(); \
+ return false; \
+ }
+
+ /**
+ * @brief creates the Wasm sandbox from the given shared library
+ *
+ * @param infallible if set to true, the sandbox aborts on failure. If false,
+ * the sandbox returns creation status as a return value
+ * @param custom_capacity allows optionally overriding the platform-specified
+ * maximum size of the wasm heap allowed for this sandbox instance.
+ * @return true when sandbox is successfully created. false when infallible is
+ * set to false and sandbox was not successfully created. If infallible is set
+ * to true, this function will never return false.
+ */
+ inline bool impl_create_sandbox(
+ bool infallible = true,
+ const w2c_mem_capacity* custom_capacity = nullptr)
+ {
+ FALLIBLE_DYNAMIC_CHECK(
+ infallible, instance_initialized == false, "Sandbox already initialized");
+
+ bool minwasi_init_succeeded = true;
+
+ std::call_once(rlbox_wasm2c_initialized, [&]() {
+ wasm_rt_init();
+ minwasi_init_succeeded = minwasi_init();
+ });
+
+ FALLIBLE_DYNAMIC_CHECK(
+ infallible, minwasi_init_succeeded, "Could not initialize min wasi");
+
+ const bool minwasi_init_inst_succeeded = minwasi_init_instance(&wasi_env);
+ FALLIBLE_DYNAMIC_CHECK(
+ infallible, minwasi_init_inst_succeeded, "Could not initialize min wasi instance");
+
+ if (custom_capacity) {
+ FALLIBLE_DYNAMIC_CHECK(
+ infallible, custom_capacity->is_valid, "Invalid capacity");
+ }
+
+ sandbox_memory_info = create_wasm2c_memory(
+ *RLBOX_WASM_MODULE_TYPE_CURR::initial_memory_pages, custom_capacity);
+ FALLIBLE_DYNAMIC_CHECK(infallible,
+ sandbox_memory_info.data != nullptr,
+ "Could not allocate a heap for the wasm2c sandbox");
+
+ FALLIBLE_DYNAMIC_CHECK(infallible,
+ *RLBOX_WASM_MODULE_TYPE_CURR::is_memory_64 == 0,
+ "Does not support Wasm with memory64");
+
+ const uint32_t max_table_size = 0xffffffffu; /* this means unlimited */
+ wasm_rt_allocate_funcref_table(
+ &sandbox_callback_table,
+ *RLBOX_WASM_MODULE_TYPE_CURR::initial_func_elements,
+ max_table_size);
+
+ sandbox_memory_env.sandbox_memory_info = &sandbox_memory_info;
+ sandbox_memory_env.sandbox_callback_table = &sandbox_callback_table;
+ wasi_env.instance_memory = &sandbox_memory_info;
+ RLBOX_WASM_MODULE_TYPE_CURR::create_instance(
+ &wasm2c_instance, &sandbox_memory_env, &wasi_env);
+
+ heap_base = reinterpret_cast<uintptr_t>(impl_get_memory_location());
+
+ if constexpr (sizeof(uintptr_t) != sizeof(uint32_t)) {
+ // On larger platforms, check that the heap is aligned to the pointer size
+ // i.e. 32-bit pointer => aligned to 4GB. The implementations of
+ // impl_get_unsandboxed_pointer_no_ctx and
+ // impl_get_sandboxed_pointer_no_ctx below rely on this.
+ uintptr_t heap_offset_mask = std::numeric_limits<T_PointerType>::max();
+ FALLIBLE_DYNAMIC_CHECK(infallible,
+ (heap_base & heap_offset_mask) == 0,
+ "Sandbox heap not aligned to 4GB");
+ }
+
+ instance_initialized = true;
+
+ return true;
+ }
+
+#undef FALLIBLE_DYNAMIC_CHECK
+
+ inline void impl_destroy_sandbox()
+ {
+ if (return_slot_size) {
+ impl_free_in_sandbox(return_slot);
+ }
+
+ if (instance_initialized) {
+ instance_initialized = false;
+ RLBOX_WASM_MODULE_TYPE_CURR::free_instance(&wasm2c_instance);
+ }
+
+ destroy_wasm2c_memory(&sandbox_memory_info);
+ wasm_rt_free_funcref_table(&sandbox_callback_table);
+ minwasi_cleanup_instance(&wasi_env);
+ }
+
+ template<typename T>
+ inline void* impl_get_unsandboxed_pointer(T_PointerType p) const
+ {
+ if constexpr (std::is_function_v<std::remove_pointer_t<T>>) {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+ auto found = slot_assignments.find(p);
+ if (found != slot_assignments.end()) {
+ auto ret = found->second;
+ return const_cast<void*>(ret);
+ } else {
+ return nullptr;
+ }
+ } else {
+ return reinterpret_cast<void*>(heap_base + p);
+ }
+ }
+
+ template<typename T>
+ inline T_PointerType impl_get_sandboxed_pointer(const void* p) const
+ {
+ if constexpr (std::is_function_v<std::remove_pointer_t<T>>) {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+
+ uint32_t slot_number = 0;
+ auto found = internal_callbacks.find(p);
+ if (found != internal_callbacks.end()) {
+ slot_number = found->second;
+ } else {
+
+ slot_number = new_callback_slot();
+ wasm_rt_funcref_t func_val;
+ func_val.func_type = get_wasm2c_func_index(static_cast<T>(nullptr));
+ func_val.func =
+ reinterpret_cast<wasm_rt_function_ptr_t>(const_cast<void*>(p));
+ func_val.module_instance = &wasm2c_instance;
+
+ sandbox_callback_table.data[slot_number] = func_val;
+ internal_callbacks[p] = slot_number;
+ slot_assignments[slot_number] = p;
+ }
+ return static_cast<T_PointerType>(slot_number);
+ } else {
+ if constexpr (sizeof(uintptr_t) == sizeof(uint32_t)) {
+ return static_cast<T_PointerType>(reinterpret_cast<uintptr_t>(p) -
+ heap_base);
+ } else {
+ return static_cast<T_PointerType>(reinterpret_cast<uintptr_t>(p));
+ }
+ }
+ }
+
+ template<typename T>
+ static inline void* impl_get_unsandboxed_pointer_no_ctx(
+ T_PointerType p,
+ const void* example_unsandboxed_ptr,
+ rlbox_wasm2c_sandbox* (*expensive_sandbox_finder)(
+ const void* example_unsandboxed_ptr))
+ {
+ // on 32-bit platforms we don't assume the heap is aligned
+ if constexpr (sizeof(uintptr_t) == sizeof(uint32_t)) {
+ auto sandbox = expensive_sandbox_finder(example_unsandboxed_ptr);
+ return sandbox->template impl_get_unsandboxed_pointer<T>(p);
+ } else {
+ if constexpr (std::is_function_v<std::remove_pointer_t<T>>) {
+ // swizzling function pointers needs access to the function pointer
+ // tables and thus cannot be done without context
+ auto sandbox = expensive_sandbox_finder(example_unsandboxed_ptr);
+ return sandbox->template impl_get_unsandboxed_pointer<T>(p);
+ } else {
+ // grab the memory base from the example_unsandboxed_ptr
+ uintptr_t heap_base_mask =
+ std::numeric_limits<uintptr_t>::max() &
+ ~(static_cast<uintptr_t>(std::numeric_limits<T_PointerType>::max()));
+ uintptr_t computed_heap_base =
+ reinterpret_cast<uintptr_t>(example_unsandboxed_ptr) & heap_base_mask;
+ uintptr_t ret = computed_heap_base | p;
+ return reinterpret_cast<void*>(ret);
+ }
+ }
+ }
+
+ template<typename T>
+ static inline T_PointerType impl_get_sandboxed_pointer_no_ctx(
+ const void* p,
+ const void* example_unsandboxed_ptr,
+ rlbox_wasm2c_sandbox* (*expensive_sandbox_finder)(
+ const void* example_unsandboxed_ptr))
+ {
+ // on 32-bit platforms we don't assume the heap is aligned
+ if constexpr (sizeof(uintptr_t) == sizeof(uint32_t)) {
+ auto sandbox = expensive_sandbox_finder(example_unsandboxed_ptr);
+ return sandbox->template impl_get_sandboxed_pointer<T>(p);
+ } else {
+ if constexpr (std::is_function_v<std::remove_pointer_t<T>>) {
+ // swizzling function pointers needs access to the function pointer
+ // tables and thus cannot be done without context
+ auto sandbox = expensive_sandbox_finder(example_unsandboxed_ptr);
+ return sandbox->template impl_get_sandboxed_pointer<T>(p);
+ } else {
+ // Just clear the memory base to leave the offset
+ RLBOX_WASM2C_UNUSED(example_unsandboxed_ptr);
+ uintptr_t ret = reinterpret_cast<uintptr_t>(p) &
+ std::numeric_limits<T_PointerType>::max();
+ return static_cast<T_PointerType>(ret);
+ }
+ }
+ }
+
+ static inline bool impl_is_in_same_sandbox(const void* p1, const void* p2)
+ {
+ uintptr_t heap_base_mask = std::numeric_limits<uintptr_t>::max() &
+ ~(std::numeric_limits<T_PointerType>::max());
+ return (reinterpret_cast<uintptr_t>(p1) & heap_base_mask) ==
+ (reinterpret_cast<uintptr_t>(p2) & heap_base_mask);
+ }
+
+ inline bool impl_is_pointer_in_sandbox_memory(const void* p)
+ {
+ size_t length = impl_get_total_memory();
+ uintptr_t p_val = reinterpret_cast<uintptr_t>(p);
+ return p_val >= heap_base && p_val < (heap_base + length);
+ }
+
+ inline bool impl_is_pointer_in_app_memory(const void* p)
+ {
+ return !(impl_is_pointer_in_sandbox_memory(p));
+ }
+
+ inline size_t impl_get_total_memory() { return sandbox_memory_info.size; }
+
+ inline void* impl_get_memory_location() const
+ {
+ return sandbox_memory_info.data;
+ }
+
+ template<typename T, typename T_Converted, typename... T_Args>
+ auto impl_invoke_with_func_ptr(T_Converted* func_ptr, T_Args&&... params)
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_wasm2c_sandbox_thread_data();
+#endif
+ auto old_sandbox = thread_data.sandbox;
+ thread_data.sandbox = this;
+ auto on_exit =
+ detail::make_scope_exit([&] { thread_data.sandbox = old_sandbox; });
+
+ // WASM functions are mangled in the following manner
+ // 1. All primitive types are left as is and follow an LP32 machine model
+ // (as opposed to the possibly 64-bit application)
+ // 2. All pointers are changed to u32 types
+ // 3. Returned class are returned as an out parameter before the actual
+ // function parameters
+ // 4. All class parameters are passed as pointers (u32 types)
+ // 5. The heap address is passed in as the first argument to the function
+ //
+ // RLBox accounts for the first 2 differences in T_Converted type, but we
+ // need to handle the rest
+
+ // Handle point 3
+ using T_Ret = wasm2c_detail::return_argument<T_Converted>;
+ if constexpr (std::is_class_v<T_Ret>) {
+ using T_Conv1 = wasm2c_detail::change_return_type<T_Converted, void>;
+ using T_Conv2 = wasm2c_detail::prepend_arg_type<T_Conv1, T_PointerType>;
+ auto func_ptr_conv =
+ reinterpret_cast<T_Conv2*>(reinterpret_cast<uintptr_t>(func_ptr));
+ ensure_return_slot_size(sizeof(T_Ret));
+ impl_invoke_with_func_ptr<T>(func_ptr_conv, return_slot, params...);
+
+ auto ptr = reinterpret_cast<T_Ret*>(
+ impl_get_unsandboxed_pointer<T_Ret*>(return_slot));
+ T_Ret ret = *ptr;
+ return ret;
+ }
+
+ // Handle point 4
+ constexpr size_t alloc_length = [&] {
+ if constexpr (sizeof...(params) > 0) {
+ return ((std::is_class_v<T_Args> ? 1 : 0) + ...);
+ } else {
+ return 0;
+ }
+ }();
+
+ // 0 arg functions create 0 length arrays which is not allowed
+ T_PointerType allocations_buff[alloc_length == 0 ? 1 : alloc_length];
+ T_PointerType* allocations = allocations_buff;
+
+ auto serialize_class_arg =
+ [&](auto arg) -> std::conditional_t<std::is_class_v<decltype(arg)>,
+ T_PointerType,
+ decltype(arg)> {
+ using T_Arg = decltype(arg);
+ if constexpr (std::is_class_v<T_Arg>) {
+ auto slot = impl_malloc_in_sandbox(sizeof(T_Arg));
+ auto ptr =
+ reinterpret_cast<T_Arg*>(impl_get_unsandboxed_pointer<T_Arg*>(slot));
+ *ptr = arg;
+ allocations[0] = slot;
+ allocations++;
+ return slot;
+ } else {
+ return arg;
+ }
+ };
+
+ // 0 arg functions don't use serialize
+ RLBOX_WASM2C_UNUSED(serialize_class_arg);
+
+ using T_ConvNoClass =
+ wasm2c_detail::change_class_arg_types<T_Converted, T_PointerType>;
+
+ // Handle Point 5
+ using T_ConvHeap = wasm2c_detail::prepend_arg_type<
+ T_ConvNoClass,
+ typename RLBOX_WASM_MODULE_TYPE_CURR::instance_t*>;
+
+ // Function invocation
+ auto func_ptr_conv =
+ reinterpret_cast<T_ConvHeap*>(reinterpret_cast<uintptr_t>(func_ptr));
+
+ using T_NoVoidRet =
+ std::conditional_t<std::is_void_v<T_Ret>, uint32_t, T_Ret>;
+ T_NoVoidRet ret;
+
+ if constexpr (std::is_void_v<T_Ret>) {
+ RLBOX_WASM2C_UNUSED(ret);
+ func_ptr_conv(&wasm2c_instance, serialize_class_arg(params)...);
+ } else {
+ ret = func_ptr_conv(&wasm2c_instance, serialize_class_arg(params)...);
+ }
+
+ for (size_t i = 0; i < alloc_length; i++) {
+ impl_free_in_sandbox(allocations_buff[i]);
+ }
+
+ if constexpr (!std::is_void_v<T_Ret>) {
+ return ret;
+ }
+ }
+
+ inline T_PointerType impl_malloc_in_sandbox(size_t size)
+ {
+ if constexpr (sizeof(size) > sizeof(uint32_t)) {
+ detail::dynamic_check(size <= std::numeric_limits<uint32_t>::max(),
+ "Attempting to malloc more than the heap size");
+ }
+ using T_Func = void*(size_t);
+ using T_Converted = T_PointerType(uint32_t);
+ T_PointerType ret = impl_invoke_with_func_ptr<T_Func, T_Converted>(
+ reinterpret_cast<T_Converted*>(
+ RLBOX_WASM_MODULE_TYPE_CURR::malloc_address),
+ static_cast<uint32_t>(size));
+ return ret;
+ }
+
+ inline void impl_free_in_sandbox(T_PointerType p)
+ {
+ using T_Func = void(void*);
+ using T_Converted = void(T_PointerType);
+ impl_invoke_with_func_ptr<T_Func, T_Converted>(
+ reinterpret_cast<T_Converted*>(RLBOX_WASM_MODULE_TYPE_CURR::free_address),
+ p);
+ }
+
+private:
+ // Should be called with callback_mutex held
+ uint32_t new_callback_slot() const
+ {
+ if (callback_free_list.size() > 0) {
+ uint32_t ret = callback_free_list.back();
+ callback_free_list.pop_back();
+ return ret;
+ }
+
+ const uint32_t curr_size = sandbox_callback_table.size;
+
+ detail::dynamic_check(
+ curr_size < sandbox_callback_table.max_size,
+ "Could not find an empty row in Wasm instance table. This would "
+ "happen if you have registered too many callbacks, or unsandboxed "
+ "too many function pointers.");
+
+ wasm_rt_funcref_t func_val{ 0 };
+ // on success, this returns the previous number of elements in the table
+ const uint32_t ret =
+ wasm_rt_grow_funcref_table(&sandbox_callback_table, 1, func_val);
+
+ detail::dynamic_check(
+ ret != 0 && ret != (uint32_t)-1,
+ "Adding a new callback slot to the wasm instance failed.");
+
+ // We have expanded the number of slots
+ // Previous slots size: ret
+ // New slot is at index: ret
+ const uint32_t slot_number = ret;
+ return slot_number;
+ }
+
+ void free_callback_slot(uint32_t slot) const
+ {
+ callback_free_list.push_back(slot);
+ }
+
+public:
+ template<typename T_Ret, typename... T_Args>
+ inline T_PointerType impl_register_callback(void* key, void* callback)
+ {
+ bool found = false;
+ uint32_t found_loc = 0;
+ wasm_rt_function_ptr_t chosen_interceptor = nullptr;
+
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+
+ // need a compile time for loop as we we need I to be a compile time value
+ // this is because we are setting the I'th callback ineterceptor
+ wasm2c_detail::compile_time_for<MAX_CALLBACKS>([&](auto I) {
+ constexpr auto i = I.value;
+ if (!found && callbacks[i] == nullptr) {
+ found = true;
+ found_loc = i;
+
+ if constexpr (std::is_class_v<T_Ret>) {
+ chosen_interceptor = (wasm_rt_function_ptr_t)(
+ callback_interceptor_promoted<i, T_Ret, T_Args...>);
+ } else {
+ chosen_interceptor =
+ (wasm_rt_function_ptr_t)(callback_interceptor<i, T_Ret, T_Args...>);
+ }
+ }
+ });
+
+ detail::dynamic_check(
+ found,
+ "Could not find an empty slot in sandbox function table. This would "
+ "happen if you have registered too many callbacks, or unsandboxed "
+ "too many function pointers. You can file a bug if you want to "
+ "increase the maximum allowed callbacks or unsadnboxed functions "
+ "pointers");
+
+ wasm_rt_funcref_t func_val;
+ func_val.func_type = get_wasm2c_func_index<T_Ret, T_Args...>();
+ func_val.func = chosen_interceptor;
+ func_val.module_instance = &wasm2c_instance;
+
+ const uint32_t slot_number = new_callback_slot();
+ sandbox_callback_table.data[slot_number] = func_val;
+
+ callback_unique_keys[found_loc] = key;
+ callbacks[found_loc] = callback;
+ callback_slot_assignment[found_loc] = slot_number;
+ slot_assignments[slot_number] = callback;
+
+ return static_cast<T_PointerType>(slot_number);
+ }
+
+ static inline std::pair<rlbox_wasm2c_sandbox*, void*>
+ impl_get_executed_callback_sandbox_and_key()
+ {
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+ auto& thread_data = *get_rlbox_wasm2c_sandbox_thread_data();
+#endif
+ auto sandbox = thread_data.sandbox;
+ auto callback_num = thread_data.last_callback_invoked;
+ void* key = sandbox->callback_unique_keys[callback_num];
+ return std::make_pair(sandbox, key);
+ }
+
+ template<typename T_Ret, typename... T_Args>
+ inline void impl_unregister_callback(void* key)
+ {
+ bool found = false;
+ uint32_t i = 0;
+ {
+ RLBOX_ACQUIRE_UNIQUE_GUARD(lock, callback_mutex);
+ for (; i < MAX_CALLBACKS; i++) {
+ if (callback_unique_keys[i] == key) {
+ const uint32_t slot_number = callback_slot_assignment[i];
+ wasm_rt_funcref_t func_val{ 0 };
+ sandbox_callback_table.data[slot_number] = func_val;
+
+ callback_unique_keys[i] = nullptr;
+ callbacks[i] = nullptr;
+ callback_slot_assignment[i] = 0;
+ found = true;
+ break;
+ }
+ }
+ }
+
+ detail::dynamic_check(
+ found, "Internal error: Could not find callback to unregister");
+
+ return;
+ }
+};
+
+} // namespace rlbox
diff --git a/third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_tls.hpp b/third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_tls.hpp
new file mode 100644
index 0000000000..b87ac7bf5f
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/include/rlbox_wasm2c_tls.hpp
@@ -0,0 +1,33 @@
+#pragma once
+
+#include <stdint.h>
+
+namespace rlbox {
+
+class rlbox_wasm2c_sandbox;
+
+struct rlbox_wasm2c_sandbox_thread_data
+{
+ rlbox_wasm2c_sandbox* sandbox;
+ uint32_t last_callback_invoked;
+};
+
+#ifdef RLBOX_EMBEDDER_PROVIDES_TLS_STATIC_VARIABLES
+
+rlbox_wasm2c_sandbox_thread_data* get_rlbox_wasm2c_sandbox_thread_data();
+
+# define RLBOX_WASM2C_SANDBOX_STATIC_VARIABLES() \
+ thread_local rlbox::rlbox_wasm2c_sandbox_thread_data \
+ rlbox_wasm2c_sandbox_thread_info{ 0, 0 }; \
+ \
+ namespace rlbox { \
+ rlbox_wasm2c_sandbox_thread_data* get_rlbox_wasm2c_sandbox_thread_data() \
+ { \
+ return &rlbox_wasm2c_sandbox_thread_info; \
+ } \
+ } \
+ static_assert(true, "Enforce semi-colon")
+
+#endif
+
+} // namespace rlbox
diff --git a/third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_mem.h b/third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_mem.h
new file mode 100644
index 0000000000..e61b9fa3eb
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_mem.h
@@ -0,0 +1,46 @@
+#ifndef WASM_RT_OS_H_
+#define WASM_RT_OS_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <time.h>
+
+#include "wasm-rt.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+ typedef struct w2c_env
+ {
+ wasm_rt_memory_t* sandbox_memory_info;
+ wasm_rt_funcref_table_t* sandbox_callback_table;
+ } w2c_env;
+
+ wasm_rt_memory_t* w2c_env_memory(struct w2c_env* instance);
+ wasm_rt_funcref_table_t* w2c_env_0x5F_indirect_function_table(
+ struct w2c_env*);
+
+ typedef struct w2c_mem_capacity
+ {
+ bool is_valid;
+ bool is_mem_32;
+ uint64_t max_pages;
+ uint64_t max_size;
+ } w2c_mem_capacity;
+
+ w2c_mem_capacity get_valid_wasm2c_memory_capacity(uint64_t min_capacity,
+ bool is_mem_32);
+
+ wasm_rt_memory_t create_wasm2c_memory(
+ uint32_t initial_pages,
+ const w2c_mem_capacity* custom_capacity);
+ void destroy_wasm2c_memory(wasm_rt_memory_t* memory);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_minwasi.h b/third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_minwasi.h
new file mode 100644
index 0000000000..51542b5d1e
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/include/wasm2c_rt_minwasi.h
@@ -0,0 +1,38 @@
+#ifndef WASM_RT_MINWASI_H_
+#define WASM_RT_MINWASI_H_
+
+/* A minimum wasi implementation supporting only stdin, stdout, stderr, argv
+ * (upto 100 args) and clock functions. */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "wasm-rt.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+ typedef struct w2c_wasi__snapshot__preview1
+ {
+ wasm_rt_memory_t* instance_memory;
+
+ uint32_t main_argc;
+ const char** main_argv;
+
+ uint32_t env_count;
+ const char** env;
+
+ void* clock_data;
+ } w2c_wasi__snapshot__preview1;
+
+ bool minwasi_init();
+ bool minwasi_init_instance(w2c_wasi__snapshot__preview1* wasi_data);
+ void minwasi_cleanup_instance(w2c_wasi__snapshot__preview1* wasi_data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_mem.c b/third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_mem.c
new file mode 100644
index 0000000000..1bdf6f715c
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_mem.c
@@ -0,0 +1,454 @@
+#include "wasm2c_rt_mem.h"
+#include "wasm-rt.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+enum
+{
+ MMAP_PROT_NONE = 0,
+ MMAP_PROT_READ = 1,
+ MMAP_PROT_WRITE = 2,
+ MMAP_PROT_EXEC = 4
+};
+
+/* Memory map flags */
+enum
+{
+ MMAP_MAP_NONE = 0,
+ /* Put the mapping into 0 to 2 G, supported only on x86_64 */
+ MMAP_MAP_32BIT = 1,
+ /* Don't interpret addr as a hint: place the mapping at exactly
+ that address. */
+ MMAP_MAP_FIXED = 2
+};
+
+// Try reserving an aligned memory space.
+// Returns pointer to allocated space on success, 0 on failure.
+static void* os_mmap_aligned(void* addr,
+ size_t requested_length,
+ int prot,
+ int flags,
+ size_t alignment,
+ size_t alignment_offset);
+// Unreserve the memory space
+static void os_munmap(void* addr, size_t size);
+// Allocates and sets the permissions on the previously reserved memory space
+// Returns 0 on success, non zero on failure.
+static int os_mmap_commit(void* curr_heap_end_pointer,
+ size_t expanded_size,
+ int prot);
+
+wasm_rt_memory_t* w2c_env_memory(struct w2c_env* instance)
+{
+ return instance->sandbox_memory_info;
+}
+
+wasm_rt_funcref_table_t* w2c_env_0x5F_indirect_function_table(
+ struct w2c_env* instance)
+{
+ return instance->sandbox_callback_table;
+}
+
+#define WASM_PAGE_SIZE 65536
+#define RLBOX_FOUR_GIG 0x100000000ull
+
+#if UINTPTR_MAX == 0xffffffffffffffff
+// Guard page of 4GiB
+# define WASM_HEAP_GUARD_PAGE_SIZE 0x100000000ull
+// Heap aligned to 4GB
+# define WASM_HEAP_ALIGNMENT 0x100000000ull
+// By default max heap is 4GB
+# define WASM_HEAP_DEFAULT_MAX_PAGES 65536
+#elif UINTPTR_MAX == 0xffffffff
+// No guard pages
+# define WASM_HEAP_GUARD_PAGE_SIZE 0
+// Unaligned heap
+# define WASM_HEAP_ALIGNMENT 0
+// Default max heap is 16MB
+# define WASM_HEAP_DEFAULT_MAX_PAGES 256
+#else
+# error "Unknown pointer size"
+#endif
+
+static uint64_t compute_heap_reserve_space(uint32_t chosen_max_pages)
+{
+ const uint64_t heap_reserve_size =
+ ((uint64_t)chosen_max_pages) * WASM_PAGE_SIZE + WASM_HEAP_GUARD_PAGE_SIZE;
+ return heap_reserve_size;
+}
+
+w2c_mem_capacity get_valid_wasm2c_memory_capacity(uint64_t min_capacity,
+ bool is_mem_32)
+{
+ const w2c_mem_capacity err_val = { false /* is_valid */,
+ false /* is_mem_32 */,
+ 0 /* max_pages */,
+ 0 /* max_size */ };
+
+ // We do not handle memory 64
+ if (!is_mem_32) {
+ return err_val;
+ }
+
+ const uint64_t default_capacity =
+ ((uint64_t)WASM_HEAP_DEFAULT_MAX_PAGES) * WASM_PAGE_SIZE;
+
+ if (min_capacity <= default_capacity) {
+ // Handle 0 case and small values
+ const w2c_mem_capacity ret = { true /* is_valid */,
+ true /* is_mem_32 */,
+ WASM_HEAP_DEFAULT_MAX_PAGES /* max_pages */,
+ default_capacity /* max_size */ };
+ return ret;
+ } else if (min_capacity > UINT32_MAX) {
+ // Handle out of range values
+ return err_val;
+ }
+
+ const uint64_t page_size_minus_1 = WASM_PAGE_SIZE - 1;
+ // Get number of pages greater than min_capacity
+ const uint64_t capacity_pages = ((min_capacity - 1) / page_size_minus_1) + 1;
+
+ const w2c_mem_capacity ret = { true /* is_valid */,
+ true /* is_mem_32 */,
+ capacity_pages /* max_pages */,
+ capacity_pages *
+ WASM_PAGE_SIZE /* max_size */ };
+ return ret;
+}
+
+wasm_rt_memory_t create_wasm2c_memory(uint32_t initial_pages,
+ const w2c_mem_capacity* custom_capacity)
+{
+
+ if (custom_capacity && !custom_capacity->is_valid) {
+ wasm_rt_memory_t ret = { 0 };
+ return ret;
+ }
+
+ const uint32_t byte_length = initial_pages * WASM_PAGE_SIZE;
+ const uint64_t chosen_max_pages =
+ custom_capacity ? custom_capacity->max_pages : WASM_HEAP_DEFAULT_MAX_PAGES;
+ const uint64_t heap_reserve_size =
+ compute_heap_reserve_space(chosen_max_pages);
+
+ uint8_t* data = 0;
+ const uint64_t retries = 10;
+ for (uint64_t i = 0; i < retries; i++) {
+ data = (uint8_t*)os_mmap_aligned(0,
+ heap_reserve_size,
+ MMAP_PROT_NONE,
+ MMAP_MAP_NONE,
+ WASM_HEAP_ALIGNMENT,
+ 0 /* alignment_offset */);
+ if (data) {
+ int ret =
+ os_mmap_commit(data, byte_length, MMAP_PROT_READ | MMAP_PROT_WRITE);
+ if (ret != 0) {
+ // failed to set permissions
+ os_munmap(data, heap_reserve_size);
+ data = 0;
+ }
+ break;
+ }
+ }
+
+ wasm_rt_memory_t ret;
+ ret.data = data;
+ ret.max_pages = chosen_max_pages;
+ ret.pages = initial_pages;
+ ret.size = byte_length;
+ return ret;
+}
+
+void destroy_wasm2c_memory(wasm_rt_memory_t* memory)
+{
+ if (memory->data != 0) {
+ const uint64_t heap_reserve_size =
+ compute_heap_reserve_space(memory->max_pages);
+ os_munmap(memory->data, heap_reserve_size);
+ memory->data = 0;
+ }
+}
+
+#undef WASM_HEAP_DEFAULT_MAX_PAGES
+#undef WASM_HEAP_ALIGNMENT
+#undef WASM_HEAP_GUARD_PAGE_SIZE
+#undef RLBOX_FOUR_GIG
+#undef WASM_PAGE_SIZE
+
+// Based on
+// https://web.archive.org/web/20191012035921/http://nadeausoftware.com/articles/2012/01/c_c_tip_how_use_compiler_predefined_macros_detect_operating_system#BSD
+// Check for windows (non cygwin) environment
+#if defined(_WIN32)
+
+# include <windows.h>
+
+static size_t os_getpagesize()
+{
+ SYSTEM_INFO S;
+ GetNativeSystemInfo(&S);
+ return S.dwPageSize;
+}
+
+static void* win_mmap(void* hint,
+ size_t size,
+ int prot,
+ int flags,
+ DWORD alloc_flag)
+{
+ DWORD flProtect = PAGE_NOACCESS;
+ size_t request_size, page_size;
+ void* addr;
+
+ page_size = os_getpagesize();
+ request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (request_size < size)
+ /* integer overflow */
+ return NULL;
+
+ if (request_size == 0)
+ request_size = page_size;
+
+ if (prot & MMAP_PROT_EXEC) {
+ if (prot & MMAP_PROT_WRITE)
+ flProtect = PAGE_EXECUTE_READWRITE;
+ else
+ flProtect = PAGE_EXECUTE_READ;
+ } else if (prot & MMAP_PROT_WRITE)
+ flProtect = PAGE_READWRITE;
+ else if (prot & MMAP_PROT_READ)
+ flProtect = PAGE_READONLY;
+
+ addr = VirtualAlloc((LPVOID)hint, request_size, alloc_flag, flProtect);
+ return addr;
+}
+
+static void* os_mmap_aligned(void* addr,
+ size_t requested_length,
+ int prot,
+ int flags,
+ size_t alignment,
+ size_t alignment_offset)
+{
+ size_t padded_length = requested_length + alignment + alignment_offset;
+ uintptr_t unaligned =
+ (uintptr_t)win_mmap(addr, padded_length, prot, flags, MEM_RESERVE);
+
+ if (!unaligned) {
+ return (void*)unaligned;
+ }
+
+ // Round up the next address that has addr % alignment = 0
+ const size_t alignment_corrected = alignment == 0 ? 1 : alignment;
+ uintptr_t aligned_nonoffset =
+ (unaligned + (alignment_corrected - 1)) & ~(alignment_corrected - 1);
+
+ // Currently offset 0 is aligned according to alignment
+ // Alignment needs to be enforced at the given offset
+ uintptr_t aligned = 0;
+ if ((aligned_nonoffset - alignment_offset) >= unaligned) {
+ aligned = aligned_nonoffset - alignment_offset;
+ } else {
+ aligned = aligned_nonoffset - alignment_offset + alignment;
+ }
+
+ if (aligned == unaligned && padded_length == requested_length) {
+ return (void*)aligned;
+ }
+
+ // Sanity check
+ if (aligned < unaligned ||
+ (aligned + (requested_length - 1)) > (unaligned + (padded_length - 1)) ||
+ (aligned + alignment_offset) % alignment_corrected != 0) {
+ os_munmap((void*)unaligned, padded_length);
+ return NULL;
+ }
+
+ // windows does not support partial unmapping, so unmap and remap
+ os_munmap((void*)unaligned, padded_length);
+ aligned = (uintptr_t)win_mmap(
+ (void*)aligned, requested_length, prot, flags, MEM_RESERVE);
+ return (void*)aligned;
+}
+
+static void os_munmap(void* addr, size_t size)
+{
+ DWORD alloc_flag = MEM_RELEASE;
+ if (addr) {
+ if (VirtualFree(addr, 0, alloc_flag) == 0) {
+ size_t page_size = os_getpagesize();
+ size_t request_size = (size + page_size - 1) & ~(page_size - 1);
+ int64_t curr_err = errno;
+ printf("os_munmap error addr:%p, size:0x%zx, errno:%" PRId64 "\n",
+ addr,
+ request_size,
+ curr_err);
+ }
+ }
+}
+
+static int os_mmap_commit(void* curr_heap_end_pointer,
+ size_t expanded_size,
+ int prot)
+{
+ uintptr_t addr = (uintptr_t)win_mmap(
+ curr_heap_end_pointer, expanded_size, prot, MMAP_MAP_NONE, MEM_COMMIT);
+ int ret = addr ? 0 : -1;
+ return ret;
+}
+
+#elif !defined(_WIN32) && (defined(__unix__) || defined(__unix) || \
+ (defined(__APPLE__) && defined(__MACH__)))
+
+# include <sys/mman.h>
+# include <unistd.h>
+
+static size_t os_getpagesize()
+{
+ return getpagesize();
+}
+
+static void* os_mmap(void* hint, size_t size, int prot, int flags)
+{
+ int map_prot = PROT_NONE;
+ int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ uint64_t request_size, page_size;
+ void* addr;
+
+ page_size = (uint64_t)os_getpagesize();
+ request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if ((size_t)request_size < size)
+ /* integer overflow */
+ return NULL;
+
+ if (request_size > 16 * (uint64_t)UINT32_MAX)
+ /* At most 16 G is allowed */
+ return NULL;
+
+ if (prot & MMAP_PROT_READ)
+ map_prot |= PROT_READ;
+
+ if (prot & MMAP_PROT_WRITE)
+ map_prot |= PROT_WRITE;
+
+ if (prot & MMAP_PROT_EXEC)
+ map_prot |= PROT_EXEC;
+
+# if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+# ifndef __APPLE__
+ if (flags & MMAP_MAP_32BIT)
+ map_flags |= MAP_32BIT;
+# endif
+# endif
+
+ if (flags & MMAP_MAP_FIXED)
+ map_flags |= MAP_FIXED;
+
+ addr = mmap(hint, request_size, map_prot, map_flags, -1, 0);
+
+ if (addr == MAP_FAILED)
+ return NULL;
+
+ return addr;
+}
+
+static void* os_mmap_aligned(void* addr,
+ size_t requested_length,
+ int prot,
+ int flags,
+ size_t alignment,
+ size_t alignment_offset)
+{
+ size_t padded_length = requested_length + alignment + alignment_offset;
+ uintptr_t unaligned = (uintptr_t)os_mmap(addr, padded_length, prot, flags);
+
+ if (!unaligned) {
+ return (void*)unaligned;
+ }
+
+ // Round up the next address that has addr % alignment = 0
+ const size_t alignment_corrected = alignment == 0 ? 1 : alignment;
+ uintptr_t aligned_nonoffset =
+ (unaligned + (alignment_corrected - 1)) & ~(alignment_corrected - 1);
+
+ // Currently offset 0 is aligned according to alignment
+ // Alignment needs to be enforced at the given offset
+ uintptr_t aligned = 0;
+ if ((aligned_nonoffset - alignment_offset) >= unaligned) {
+ aligned = aligned_nonoffset - alignment_offset;
+ } else {
+ aligned = aligned_nonoffset - alignment_offset + alignment;
+ }
+
+ // Sanity check
+ if (aligned < unaligned ||
+ (aligned + (requested_length - 1)) > (unaligned + (padded_length - 1)) ||
+ (aligned + alignment_offset) % alignment_corrected != 0) {
+ os_munmap((void*)unaligned, padded_length);
+ return NULL;
+ }
+
+ {
+ size_t unused_front = aligned - unaligned;
+ if (unused_front != 0) {
+ os_munmap((void*)unaligned, unused_front);
+ }
+ }
+
+ {
+ size_t unused_back =
+ (unaligned + (padded_length - 1)) - (aligned + (requested_length - 1));
+ if (unused_back != 0) {
+ os_munmap((void*)(aligned + requested_length), unused_back);
+ }
+ }
+
+ return (void*)aligned;
+}
+
+static void os_munmap(void* addr, size_t size)
+{
+ uint64_t page_size = (uint64_t)os_getpagesize();
+ uint64_t request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (addr) {
+ if (munmap(addr, request_size)) {
+ printf("os_munmap error addr:%p, size:0x%" PRIx64 ", errno:%d\n",
+ addr,
+ request_size,
+ errno);
+ }
+ }
+}
+
+static int os_mmap_commit(void* addr, size_t size, int prot)
+{
+ int map_prot = PROT_NONE;
+ uint64_t page_size = (uint64_t)os_getpagesize();
+ uint64_t request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (!addr)
+ return 0;
+
+ if (prot & MMAP_PROT_READ)
+ map_prot |= PROT_READ;
+
+ if (prot & MMAP_PROT_WRITE)
+ map_prot |= PROT_WRITE;
+
+ if (prot & MMAP_PROT_EXEC)
+ map_prot |= PROT_EXEC;
+
+ return mprotect(addr, request_size, map_prot);
+}
+
+#else
+# error "Unknown OS"
+#endif
diff --git a/third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_minwasi.c b/third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_minwasi.c
new file mode 100644
index 0000000000..9289cd75b1
--- /dev/null
+++ b/third_party/rlbox_wasm2c_sandbox/src/wasm2c_rt_minwasi.c
@@ -0,0 +1,799 @@
+/* A minimum wasi implementation supporting only stdin, stdout, stderr, argv
+ * (upto 1000 args), env (upto 1000 env), and clock functions. */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+# include <windows.h>
+#endif
+
+#if defined(__APPLE__) && defined(__MACH__)
+// Macs priors to OSX 10.12 don't have the clock functions. So we will use mac
+// specific options
+# include <mach/mach_time.h>
+# include <sys/time.h>
+#endif
+
+#include "wasm-rt.h"
+#include "wasm2c_rt_minwasi.h"
+
+#ifndef WASM_RT_CORE_TYPES_DEFINED
+# define WASM_RT_CORE_TYPES_DEFINED
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef int64_t s64;
+typedef float f32;
+typedef double f64;
+#endif
+
+#ifndef UNLIKELY
+# if defined(__GNUC__)
+# define UNLIKELY(x) __builtin_expect(!!(x), 0)
+# define LIKELY(x) __builtin_expect(!!(x), 1)
+# else
+# define UNLIKELY(x) (x)
+# define LIKELY(x) (x)
+# endif
+#endif
+
+#define TRAP(x) wasm_rt_trap(WASM_RT_TRAP_##x)
+
+#define WASI_MEMACCESS(mem, a) ((void*)&(mem->data[a]))
+
+#define WASI_MEMCHECK_SIZE(mem, a, sz) \
+ if (UNLIKELY(((u64)(a)) + sz > mem->size)) \
+ TRAP(OOB)
+
+#define WASI_CHECK_COPY(mem, a, sz, src) \
+ do { \
+ WASI_MEMCHECK_SIZE(mem, a, sz); \
+ memcpy(WASI_MEMACCESS(mem, a), src, sz); \
+ } while (0)
+
+#define WASI_MEMCHECK(mem, a, t) WASI_MEMCHECK_SIZE(mem, a, sizeof(t))
+
+#define DEFINE_WASI_LOAD(name, t1, t2, t3) \
+ static inline t3 name(wasm_rt_memory_t* mem, u64 addr) \
+ { \
+ WASI_MEMCHECK(mem, addr, t1); \
+ t1 result; \
+ memcpy(&result, WASI_MEMACCESS(mem, addr), sizeof(t1)); \
+ return (t3)(t2)result; \
+ }
+
+#define DEFINE_WASI_STORE(name, t1, t2) \
+ static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) \
+ { \
+ WASI_MEMCHECK(mem, addr, t1); \
+ t1 wrapped = (t1)value; \
+ memcpy(WASI_MEMACCESS(mem, addr), &wrapped, sizeof(t1)); \
+ }
+
+DEFINE_WASI_LOAD(wasm_i32_load, u32, u32, u32);
+DEFINE_WASI_STORE(wasm_i32_store, u32, u32);
+DEFINE_WASI_STORE(wasm_i64_store, u64, u64);
+
+static bool safe_add_u32(u32* ret, u32 a, u32 b)
+{
+ if (UINT32_MAX - a < b) {
+ *ret = 0;
+ return false;
+ }
+ *ret = a + b;
+ return true;
+}
+
+// clang-format off
+
+////////////// Supported WASI APIs
+//
+// Clock operations
+// ----------------
+// errno_t clock_res_get(void* ctx, clockid_t clock_id, timestamp_t* resolution);
+// errno_t clock_time_get(void* ctx, clockid_t clock_id, timestamp_t precision, timestamp_t* time);
+//
+// File operations
+// ----------------
+// Only the default descriptors of STDIN, STDOUT, STDERR are allowed by the
+// runtime.
+//
+// errno_t fd_prestat_get(void* ctx, fd_t fd, prestat_t* buf);
+// errno_t fd_read(void* ctx, fd_t fd, const iovec_t* iovs, size_t iovs_len, size_t* nread);
+// errno_t fd_write(void* ctx, fd_t fd, const ciovec_t* iovs, size_t iovs_len, size_t* nwritten);
+
+////////////// Partially supported WASI APIs
+//
+// App environment operations
+// --------------------------
+// These APIs work but return an empty buffer
+//
+// errno_t args_get(void* ctx, char** argv, char* argv_buf);
+// errno_t args_sizes_get(void* ctx, size_t* argc, size_t* argv_buf_size);
+// errno_t environ_get(void* ctx, char** environment, char* environ_buf);
+// errno_t environ_sizes_get(void* ctx, size_t* environ_count, size_t* environ_buf_size);
+//
+// Proc exit operation
+// -------------------
+// This is a no-op here in this runtime as the focus is on library
+// sandboxing
+//
+// errno_t proc_exit(void* ctx, exitcode_t rval);
+
+////////////// Unsupported WASI APIs
+// errno_t fd_advise(void* ctx, fd_t fd, filesize_t offset, filesize_t len, advice_t advice);
+// errno_t fd_allocate(void* ctx, fd_t fd, filesize_t offset, filesize_t len);
+// errno_t fd_close(void* ctx, fd_t fd);
+// errno_t fd_datasync(void* ctx, fd_t fd);
+// errno_t fd_fdstat_get(void* ctx, fd_t fd, fdstat_t* buf);
+// errno_t fd_fdstat_set_flags(void* ctx, fd_t fd, fdflags_t flags);
+// errno_t fd_fdstat_set_rights(void* ctx, fd_t fd, rights_t fs_rights_base, rights_t fs_rights_inheriting);
+// errno_t fd_filestat_get(void* ctx, fd_t fd, filestat_t* buf);
+// errno_t fd_filestat_set_size(void* ctx, fd_t fd, filesize_t st_size);
+// errno_t fd_filestat_set_times(void* ctx, fd_t fd, timestamp_t st_atim, timestamp_t st_mtim, fstflags_t fst_flags);
+// errno_t fd_pread(void* ctx, fd_t fd, const iovec_t* iovs, size_t iovs_len, filesize_t offset, size_t* nread);
+// errno_t fd_prestat_dir_name(void* ctx, fd_t fd, char* path, size_t path_len);
+// errno_t fd_pwrite(void* ctx, fd_t fd, const ciovec_t* iovs, size_t iovs_len, filesize_t offset, size_t* nwritten);
+// errno_t fd_readdir(void* ctx, fd_t fd, void* buf, size_t buf_len, dircookie_t cookie, size_t* bufused);
+// errno_t fd_renumber(void* ctx, fd_t from, fd_t to);
+// errno_t fd_seek(void* ctx, fd_t fd, filedelta_t offset, whence_t whence, filesize_t* newoffset);
+// errno_t fd_sync(void* ctx, fd_t fd);
+// errno_t fd_tell(void* ctx, fd_t fd, filesize_t* offset);
+// errno_t path_create_directory(void* ctx, fd_t fd, const char* path, size_t path_len);
+// errno_t path_filestat_get(void* ctx, fd_t fd, lookupflags_t flags, const char* path, size_t path_len, filestat_t* buf);
+// errno_t path_filestat_set_times(void* ctx, fd_t fd, lookupflags_t flags, const char* path, size_t path_len, timestamp_t st_atim, timestamp_t st_mtim, fstflags_t fst_flags);
+// errno_t path_link(void* ctx, fd_t old_fd, lookupflags_t old_flags, const char* old_path, size_t old_path_len, fd_t new_fd, const char* new_path, size_t new_path_len);
+// errno_t path_open(void* ctx, fd_t dirfd, lookupflags_t dirflags, const char* path, size_t path_len, oflags_t o_flags, rights_t fs_rights_base, rights_t fs_rights_inheriting, fdflags_t fs_flags, fd_t* fd);
+// errno_t path_readlink(void* ctx, fd_t fd, const char* path, size_t path_len, char* buf, size_t buf_len, size_t* bufused);
+// errno_t path_remove_directory(void* ctx, fd_t fd, const char* path, size_t path_len);
+// errno_t path_rename(void* ctx, fd_t old_fd, const char* old_path, size_t old_path_len, fd_t new_fd, const char* new_path, size_t new_path_len);
+// errno_t path_symlink(void* ctx, const char* old_path, size_t old_path_len, fd_t fd, const char* new_path, size_t new_path_len);
+// errno_t path_unlink_file(void* ctx, fd_t fd, const char* path, size_t path_len);
+// errno_t poll_oneoff(void* ctx, const subscription_t* in, event_t* out, size_t nsubscriptions, size_t* nevents);
+// errno_t proc_raise(void* ctx, signal_t sig);
+// errno_t random_get(void* ctx, void* buf, size_t buf_len);
+// errno_t sched_yield(t* uvwasi);
+// errno_t sock_accept(void* ctx, fd_t sock, flags_t fdflags, fd* fd);
+// errno_t sock_recv(void* ctx, fd_t sock, const iovec_t* ri_data, size_t ri_data_len, riflags_t ri_flags, size_t* ro_datalen, roflags_t* ro_flags);
+// errno_t sock_send(void* ctx, fd_t sock, const ciovec_t* si_data, size_t si_data_len, siflags_t si_flags, size_t* so_datalen);
+// errno_t sock_shutdown(void* ctx, fd_t sock, sdflags_t how);
+
+// clang-format on
+
+// Success
+#define WASI_SUCCESS 0
+// Bad file descriptor.
+#define WASI_BADF_ERROR 8
+// Invalid argument
+#define WASI_INVAL_ERROR 28
+// Operation not permitted.
+#define WASI_PERM_ERROR 63
+// Syscall not implemented
+#define WASI_NOSYS_ERROR 53
+
+#define WASI_RET_ERR_ON_FAIL(exp) \
+ if (!(exp)) { \
+ return WASI_INVAL_ERROR; \
+ }
+
+/////////////////////////////////////////////////////////////
+// Clock operations
+/////////////////////////////////////////////////////////////
+
+#if defined(_WIN32)
+
+typedef struct
+{
+ LARGE_INTEGER counts_per_sec; /* conversion factor */
+} wasi_win_clock_info_t;
+
+static wasi_win_clock_info_t g_wasi_win_clock_info;
+static int g_os_data_initialized = 0;
+
+static bool os_clock_init()
+{
+ // From here:
+ // https://stackoverflow.com/questions/5404277/porting-clock-gettime-to-windows/38212960#38212960
+ if (QueryPerformanceFrequency(&g_wasi_win_clock_info.counts_per_sec) == 0) {
+ return false;
+ }
+ g_os_data_initialized = 1;
+ return true;
+}
+
+static bool os_clock_init_instance(void** clock_data_pointer)
+{
+ if (!g_os_data_initialized) {
+ os_clock_init();
+ }
+
+ wasi_win_clock_info_t* alloc =
+ (wasi_win_clock_info_t*)malloc(sizeof(wasi_win_clock_info_t));
+ if (!alloc) {
+ return false;
+ }
+ memcpy(alloc, &g_wasi_win_clock_info, sizeof(wasi_win_clock_info_t));
+ *clock_data_pointer = alloc;
+ return true;
+}
+
+static void os_clock_cleanup_instance(void** clock_data_pointer)
+{
+ if (*clock_data_pointer == 0) {
+ free(*clock_data_pointer);
+ *clock_data_pointer = 0;
+ }
+}
+
+static int os_clock_gettime(void* clock_data,
+ int clock_id,
+ struct timespec* out_struct)
+{
+ wasi_win_clock_info_t* alloc = (wasi_win_clock_info_t*)clock_data;
+
+ LARGE_INTEGER count;
+ (void)clock_id;
+
+ if (alloc->counts_per_sec.QuadPart <= 0 ||
+ QueryPerformanceCounter(&count) == 0) {
+ return -1;
+ }
+
+# define BILLION 1000000000LL
+ out_struct->tv_sec = count.QuadPart / alloc->counts_per_sec.QuadPart;
+ out_struct->tv_nsec =
+ ((count.QuadPart % alloc->counts_per_sec.QuadPart) * BILLION) /
+ alloc->counts_per_sec.QuadPart;
+# undef BILLION
+
+ return 0;
+}
+
+static int os_clock_getres(void* clock_data,
+ int clock_id,
+ struct timespec* out_struct)
+{
+ (void)clock_id;
+ out_struct->tv_sec = 0;
+ out_struct->tv_nsec = 1000;
+ return 0;
+}
+
+#elif defined(__APPLE__) && defined(__MACH__)
+
+typedef struct
+{
+ mach_timebase_info_data_t timebase; /* numer = 0, denom = 0 */
+ struct timespec inittime; /* nanoseconds since 1-Jan-1970 to init() */
+ uint64_t initclock; /* ticks since boot to init() */
+} wasi_mac_clock_info_t;
+
+static wasi_mac_clock_info_t g_wasi_mac_clock_info;
+static int g_os_data_initialized = 0;
+
+static bool os_clock_init()
+{
+ // From here:
+ // https://stackoverflow.com/questions/5167269/clock-gettime-alternative-in-mac-os-x/21352348#21352348
+ if (mach_timebase_info(&g_wasi_mac_clock_info.timebase) != 0) {
+ return false;
+ }
+
+ // microseconds since 1 Jan 1970
+ struct timeval micro;
+ if (gettimeofday(&micro, NULL) != 0) {
+ return false;
+ }
+
+ g_wasi_mac_clock_info.initclock = mach_absolute_time();
+
+ g_wasi_mac_clock_info.inittime.tv_sec = micro.tv_sec;
+ g_wasi_mac_clock_info.inittime.tv_nsec = micro.tv_usec * 1000;
+
+ g_os_data_initialized = 1;
+ return true;
+}
+
+static bool os_clock_init_instance(void** clock_data_pointer)
+{
+ if (!g_os_data_initialized) {
+ os_clock_init();
+ }
+
+ wasi_mac_clock_info_t* alloc =
+ (wasi_mac_clock_info_t*)malloc(sizeof(wasi_mac_clock_info_t));
+ if (!alloc) {
+ return false;
+ }
+ memcpy(alloc, &g_wasi_mac_clock_info, sizeof(wasi_mac_clock_info_t));
+ *clock_data_pointer = alloc;
+ return true;
+}
+
+static void os_clock_cleanup_instance(void** clock_data_pointer)
+{
+ if (*clock_data_pointer == 0) {
+ free(*clock_data_pointer);
+ *clock_data_pointer = 0;
+ }
+}
+
+static int os_clock_gettime(void* clock_data,
+ int clock_id,
+ struct timespec* out_struct)
+{
+ int ret = 0;
+ wasi_mac_clock_info_t* alloc = (wasi_mac_clock_info_t*)clock_data;
+
+ // From here:
+ // https://stackoverflow.com/questions/5167269/clock-gettime-alternative-in-mac-os-x/21352348#21352348
+
+ (void)clock_id;
+ // ticks since init
+ uint64_t clock = mach_absolute_time() - alloc->initclock;
+ // nanoseconds since init
+ uint64_t nano = clock * (uint64_t)(alloc->timebase.numer) /
+ (uint64_t)(alloc->timebase.denom);
+ *out_struct = alloc->inittime;
+
+# define BILLION 1000000000L
+ out_struct->tv_sec += nano / BILLION;
+ out_struct->tv_nsec += nano % BILLION;
+ // normalize
+ out_struct->tv_sec += out_struct->tv_nsec / BILLION;
+ out_struct->tv_nsec = out_struct->tv_nsec % BILLION;
+# undef BILLION
+ return ret;
+}
+
+static int os_clock_getres(void* clock_data,
+ int clock_id,
+ struct timespec* out_struct)
+{
+ int ret = 0;
+ (void)clock_id;
+ out_struct->tv_sec = 0;
+ out_struct->tv_nsec = 1;
+ return ret;
+}
+
+#else
+
+static bool os_clock_init()
+{
+ return true;
+}
+
+static bool os_clock_init_instance(void** clock_data_pointer)
+{
+ (void)clock_data_pointer;
+ return true;
+}
+
+static void os_clock_cleanup_instance(void** clock_data_pointer)
+{
+ (void)clock_data_pointer;
+}
+
+static int os_clock_gettime(void* clock_data,
+ int clock_id,
+ struct timespec* out_struct)
+{
+ (void)clock_data;
+ int ret = clock_gettime(clock_id, out_struct);
+ return ret;
+}
+
+static int os_clock_getres(void* clock_data,
+ int clock_id,
+ struct timespec* out_struct)
+{
+ (void)clock_data;
+ int ret = clock_getres(clock_id, out_struct);
+ return ret;
+}
+
+#endif
+
+#define WASM_CLOCK_REALTIME 0
+#define WASM_CLOCK_MONOTONIC 1
+#define WASM_CLOCK_PROCESS_CPUTIME 2
+#define WASM_CLOCK_THREAD_CPUTIME_ID 3
+
+static int check_clock(u32 clock_id)
+{
+ return clock_id == WASM_CLOCK_REALTIME || clock_id == WASM_CLOCK_MONOTONIC ||
+ clock_id == WASM_CLOCK_PROCESS_CPUTIME ||
+ clock_id == WASM_CLOCK_THREAD_CPUTIME_ID;
+}
+
+// out is a pointer to a u64 timestamp in nanoseconds
+// https://github.com/WebAssembly/WASI/blob/main/phases/snapshot/docs.md#-timestamp-u64
+u32 w2c_wasi__snapshot__preview1_clock_time_get(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 clock_id,
+ u64 precision,
+ u32 out)
+{
+ if (!check_clock(clock_id)) {
+ return WASI_INVAL_ERROR;
+ }
+
+ struct timespec out_struct;
+ int ret = os_clock_gettime(wasi_data->clock_data, clock_id, &out_struct);
+ u64 result =
+ ((u64)out_struct.tv_sec) * 1000 * 1000 * 1000 + ((u64)out_struct.tv_nsec);
+ wasm_i64_store(wasi_data->instance_memory, out, result);
+ return ret;
+}
+
+u32 w2c_wasi__snapshot__preview1_clock_res_get(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 clock_id,
+ u32 out)
+{
+ if (!check_clock(clock_id)) {
+ return WASI_INVAL_ERROR;
+ }
+
+ struct timespec out_struct;
+ int ret = os_clock_getres(wasi_data->clock_data, clock_id, &out_struct);
+ u64 result =
+ ((u64)out_struct.tv_sec) * 1000 * 1000 * 1000 + ((u64)out_struct.tv_nsec);
+ wasm_i64_store(wasi_data->instance_memory, out, result);
+ return ret;
+}
+
+/////////////////////////////////////////////////////////////
+////////// File operations
+/////////////////////////////////////////////////////////////
+
+// Only allow stdin (0), stdout (1), stderr(2)
+
+#define WASM_STDIN 0
+#define WASM_STDOUT 1
+#define WASM_STDERR 2
+
+u32 w2c_wasi__snapshot__preview1_fd_prestat_get(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 fd,
+ u32 prestat)
+{
+ if (fd == WASM_STDIN || fd == WASM_STDOUT || fd == WASM_STDERR) {
+ return WASI_PERM_ERROR;
+ }
+ return WASI_BADF_ERROR;
+}
+
+u32 w2c_wasi__snapshot__preview1_fd_write(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 fd,
+ u32 iov,
+ u32 iovcnt,
+ u32 pnum)
+{
+ if (fd != WASM_STDOUT && fd != WASM_STDERR) {
+ return WASI_BADF_ERROR;
+ }
+
+ u32 num = 0;
+ for (u32 i = 0; i < iovcnt; i++) {
+ u32 ptr = wasm_i32_load(wasi_data->instance_memory, iov + i * 8);
+ u32 len = wasm_i32_load(wasi_data->instance_memory, iov + i * 8 + 4);
+
+ WASI_MEMCHECK_SIZE(wasi_data->instance_memory, ptr, len);
+
+ size_t result = fwrite(WASI_MEMACCESS(wasi_data->instance_memory, ptr),
+ 1 /* size */,
+ len /* n */,
+ fd == WASM_STDOUT ? stdout : stderr);
+
+ // Guaranteed by fwrite
+ assert(result <= len);
+
+ WASI_RET_ERR_ON_FAIL(safe_add_u32(&num, num, (u32)result));
+
+ if (((u32)result) != len) {
+ wasm_i32_store(wasi_data->instance_memory, pnum, num);
+ return WASI_PERM_ERROR;
+ }
+ }
+
+ wasm_i32_store(wasi_data->instance_memory, pnum, num);
+ return WASI_SUCCESS;
+}
+
+u32 w2c_wasi__snapshot__preview1_fd_read(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 fd,
+ u32 iov,
+ u32 iovcnt,
+ u32 pnum)
+{
+ if (fd != WASM_STDIN) {
+ return WASI_BADF_ERROR;
+ }
+
+ u32 num = 0;
+ for (u32 i = 0; i < iovcnt; i++) {
+ u32 ptr = wasm_i32_load(wasi_data->instance_memory, iov + i * 8);
+ u32 len = wasm_i32_load(wasi_data->instance_memory, iov + i * 8 + 4);
+
+ WASI_MEMCHECK_SIZE(wasi_data->instance_memory, ptr, len);
+ size_t result = fread(WASI_MEMACCESS(wasi_data->instance_memory, ptr),
+ 1 /* size */,
+ len /* n */,
+ stdin);
+
+ // Guaranteed by fwrite
+ assert(result <= len);
+
+ WASI_RET_ERR_ON_FAIL(safe_add_u32(&num, num, (u32)result));
+
+ if (((u32)result) != len) {
+ break; // nothing more to read
+ }
+ }
+ wasm_i32_store(wasi_data->instance_memory, pnum, num);
+ return WASI_SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////
+// App environment operations
+/////////////////////////////////////////////////////////////
+
+#define ARGV_AND_ENV_LIMIT 1000
+
+static u32 strings_sizes_get(wasm_rt_memory_t* instance_memory,
+ const char* name,
+ u32 p_str_count,
+ u32 p_str_buff_size,
+ u32 string_count,
+ const char** strings)
+{
+ u32 chosen_count = string_count;
+ if (chosen_count > ARGV_AND_ENV_LIMIT) {
+ chosen_count = ARGV_AND_ENV_LIMIT;
+ printf("Truncated %s args to %d\n", name, ARGV_AND_ENV_LIMIT);
+ }
+
+ u32 curr_buf_size = 0;
+ for (u32 i = 0; i < chosen_count; i++) {
+ size_t original_len = strlen(strings[i]);
+ // len has to be at most u32 - 1
+ WASI_RET_ERR_ON_FAIL(original_len < (size_t)UINT32_MAX);
+
+ u32 len = (u32)original_len;
+ u32 len_plus_nullchar = len + 1;
+
+ WASI_RET_ERR_ON_FAIL(
+ safe_add_u32(&curr_buf_size, curr_buf_size, len_plus_nullchar));
+ }
+
+ wasm_i32_store(instance_memory, p_str_count, chosen_count);
+ wasm_i32_store(instance_memory, p_str_buff_size, curr_buf_size);
+ return WASI_SUCCESS;
+}
+
+static u32 strings_get(wasm_rt_memory_t* instance_memory,
+ const char* name,
+ u32 p_str_arr,
+ u32 p_str_buf,
+ u32 string_count,
+ const char** strings)
+{
+ u32 chosen_count = string_count;
+ if (chosen_count > ARGV_AND_ENV_LIMIT) {
+ chosen_count = ARGV_AND_ENV_LIMIT;
+ // Warning is already printed in get_size
+ }
+
+ u32 curr_buf_loc = 0;
+
+ for (u32 i = 0; i < chosen_count; i++) {
+ // Implement: p_str_arr[i] = p_str_buf[curr_buf_loc]
+ u32 target_argv_i_ref;
+ WASI_RET_ERR_ON_FAIL(safe_add_u32(&target_argv_i_ref, p_str_arr, i * 4));
+
+ u32 target_buf_curr_ref;
+ WASI_RET_ERR_ON_FAIL(
+ safe_add_u32(&target_buf_curr_ref, p_str_buf, curr_buf_loc));
+
+ wasm_i32_store(instance_memory, target_argv_i_ref, target_buf_curr_ref);
+
+ // Implement: strcpy(p_str_buf[curr_buf_loc], strings[i]);
+ size_t original_len = strlen(strings[i]);
+ // len has to be at most u32 - 1
+ WASI_RET_ERR_ON_FAIL(original_len < (size_t)UINT32_MAX);
+
+ u32 len = (u32)original_len;
+ u32 len_plus_nullchar = len + 1;
+
+ WASI_CHECK_COPY(
+ instance_memory, target_buf_curr_ref, len_plus_nullchar, strings[i]);
+ // Implement: curr_buf_loc += strlen(p_str_buf[curr_buf_loc])
+ WASI_RET_ERR_ON_FAIL(
+ safe_add_u32(&curr_buf_loc, curr_buf_loc, len_plus_nullchar));
+ }
+ return WASI_SUCCESS;
+}
+
+u32 w2c_wasi__snapshot__preview1_args_sizes_get(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 p_argc,
+ u32 p_argv_buf_size)
+{
+ return strings_sizes_get(wasi_data->instance_memory,
+ "main",
+ p_argc,
+ p_argv_buf_size,
+ wasi_data->main_argc,
+ wasi_data->main_argv);
+}
+
+u32 w2c_wasi__snapshot__preview1_args_get(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 p_argv,
+ u32 p_argv_buf)
+{
+ return strings_get(wasi_data->instance_memory,
+ "main",
+ p_argv,
+ p_argv_buf,
+ wasi_data->main_argc,
+ wasi_data->main_argv);
+}
+
+u32 w2c_wasi__snapshot__preview1_environ_sizes_get(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 p_env_count,
+ u32 p_env_buf_size)
+{
+ return strings_sizes_get(wasi_data->instance_memory,
+ "env",
+ p_env_count,
+ p_env_buf_size,
+ wasi_data->env_count,
+ wasi_data->env);
+}
+
+u32 w2c_wasi__snapshot__preview1_environ_get(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 p_env,
+ u32 p_env_buf)
+{
+ return strings_get(wasi_data->instance_memory,
+ "env",
+ p_env,
+ p_env_buf,
+ wasi_data->env_count,
+ wasi_data->env);
+}
+
+/////////////////////////////////////////////////////////////
+// Proc exit operation
+/////////////////////////////////////////////////////////////
+
+void w2c_wasi__snapshot__preview1_proc_exit(
+ w2c_wasi__snapshot__preview1* wasi_data,
+ u32 x)
+{
+#ifdef WASM2C_WASI_TRAP_ON_EXIT
+ TRAP(WASI);
+#else
+ exit(x);
+#endif
+}
+
+/////////////////////////////////////////////////////////////
+////////////// Unsupported WASI APIs
+/////////////////////////////////////////////////////////////
+
+#define STUB_IMPORT_IMPL(ret, name, params) \
+ ret name params { return WASI_NOSYS_ERROR; }
+
+// clang-format off
+
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_advise,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u64 b, u64 c, u32 d));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_allocate,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u64 b, u64 c));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_close,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 fd));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_datasync,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_fdstat_get,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_fdstat_set_flags,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_fdstat_set_rights,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u64 b, u64 c));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_filestat_get,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_filestat_set_size,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u64 b));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_filestat_set_times,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u64 b, u64 c, u32 d));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_pread,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u64 d, u32 e));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_prestat_dir_name,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_pwrite,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u64 d, u32 e));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_readdir,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u64 d, u32 e));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_renumber,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_seek,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 fd, u64 offset, u32 whence, u32 new_offset));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_sync,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_fd_tell,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_create_directory,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_filestat_get,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_filestat_set_times,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u64 e, u64 f, u32 g));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_link,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e, u32 f, u32 g));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_open,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e, u64 f, u64 g, u32 h, u32 i));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_readlink,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e, u32 f));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_remove_directory,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_rename,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e, u32 f));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_symlink,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_path_unlink_file,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_poll_oneoff,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_proc_raise,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_random_get,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_sched_yield,
+ (w2c_wasi__snapshot__preview1* wasi_data));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_sock_accept,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_sock_recv,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e, u32 f));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_sock_send,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b, u32 c, u32 d, u32 e));
+STUB_IMPORT_IMPL(u32, w2c_wasi__snapshot__preview1_sock_shutdown,
+ (w2c_wasi__snapshot__preview1* wasi_data, u32 a, u32 b));
+
+// clang-format on
+
+/////////////////////////////////////////////////////////////
+////////// Misc
+/////////////////////////////////////////////////////////////
+
+bool minwasi_init()
+{
+ return os_clock_init();
+}
+
+bool minwasi_init_instance(w2c_wasi__snapshot__preview1* wasi_data)
+{
+ return os_clock_init_instance(&(wasi_data->clock_data));
+}
+
+void minwasi_cleanup_instance(w2c_wasi__snapshot__preview1* wasi_data)
+{
+ os_clock_cleanup_instance(&(wasi_data->clock_data));
+}