summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/sys/wasm')
-rw-r--r--library/std/src/sys/wasm/alloc.rs166
-rw-r--r--library/std/src/sys/wasm/atomics/futex.rs34
-rw-r--r--library/std/src/sys/wasm/atomics/thread.rs55
-rw-r--r--library/std/src/sys/wasm/env.rs9
-rw-r--r--library/std/src/sys/wasm/mod.rs77
5 files changed, 341 insertions, 0 deletions
diff --git a/library/std/src/sys/wasm/alloc.rs b/library/std/src/sys/wasm/alloc.rs
new file mode 100644
index 000000000..6dceb1689
--- /dev/null
+++ b/library/std/src/sys/wasm/alloc.rs
@@ -0,0 +1,166 @@
+//! This is an implementation of a global allocator on wasm targets when
+//! emscripten is not in use. In that situation there's no actual runtime for us
+//! to lean on for allocation, so instead we provide our own!
+//!
+//! The wasm instruction set has two instructions for getting the current
+//! amount of memory and growing the amount of memory. These instructions are the
+//! foundation on which we're able to build an allocator, so we do so! Note that
+//! the instructions are also pretty "global" and this is the "global" allocator
+//! after all!
+//!
+//! The current allocator here is the `dlmalloc` crate which we've got included
+//! in the rust-lang/rust repository as a submodule. The crate is a port of
+//! dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
+//! for now which is currently technically required (can't link with C yet).
+//!
+//! The crate itself provides a global allocator which on wasm has no
+//! synchronization as there are no threads!
+
+use crate::alloc::{GlobalAlloc, Layout, System};
+
+static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new();
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling malloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.malloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling calloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.calloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling free() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling realloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) }
+ }
+}
+
+#[cfg(target_feature = "atomics")]
+mod lock {
+ use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
+
+ static LOCKED: AtomicI32 = AtomicI32::new(0);
+
+ pub struct DropLock;
+
+ pub fn lock() -> DropLock {
+ loop {
+ if LOCKED.swap(1, SeqCst) == 0 {
+ return DropLock;
+ }
+ // Ok so here's where things get a little depressing. At this point
+ // in time we need to synchronously acquire a lock, but we're
+ // contending with some other thread. Typically we'd execute some
+ // form of `i32.atomic.wait` like so:
+ //
+ // unsafe {
+ // let r = core::arch::wasm32::i32_atomic_wait(
+ // LOCKED.as_mut_ptr(),
+ // 1, // expected value
+ // -1, // timeout
+ // );
+ // debug_assert!(r == 0 || r == 1);
+ // }
+ //
+ // Unfortunately though in doing so we would cause issues for the
+ // main thread. The main thread in a web browser *cannot ever
+ // block*, no exceptions. This means that the main thread can't
+ // actually execute the `i32.atomic.wait` instruction.
+ //
+ // As a result if we want to work within the context of browsers we
+ // need to figure out some sort of allocation scheme for the main
+ // thread where when there's contention on the global malloc lock we
+ // do... something.
+ //
+ // Possible ideas include:
+ //
+ // 1. Attempt to acquire the global lock. If it fails, fall back to
+ // memory allocation via `memory.grow`. Later just ... somehow
+ // ... inject this raw page back into the main allocator as it
+ // gets sliced up over time. This strategy has the downside of
+ // forcing allocation of a page to happen whenever the main
+ // thread contents with other threads, which is unfortunate.
+ //
+ // 2. Maintain a form of "two level" allocator scheme where the main
+ // thread has its own allocator. Somehow this allocator would
+ // also be balanced with a global allocator, not only to have
+ // allocations cross between threads but also to ensure that the
+ // two allocators stay "balanced" in terms of free'd memory and
+ // such. This, however, seems significantly complicated.
+ //
+ // Out of a lack of other ideas, the current strategy implemented
+ // here is to simply spin. Typical spin loop algorithms have some
+ // form of "hint" here to the CPU that it's what we're doing to
+ // ensure that the CPU doesn't get too hot, but wasm doesn't have
+ // such an instruction.
+ //
+ // To be clear, spinning here is not a great solution.
+ // Another thread with the lock may take quite a long time to wake
+ // up. For example it could be in `memory.grow` or it could be
+ // evicted from the CPU for a timeslice like 10ms. For these periods
+ // of time our thread will "helpfully" sit here and eat CPU time
+ // until it itself is evicted or the lock holder finishes. This
+ // means we're just burning and wasting CPU time to no one's
+ // benefit.
+ //
+ // Spinning does have the nice properties, though, of being
+ // semantically correct, being fair to all threads for memory
+ // allocation, and being simple enough to implement.
+ //
+ // This will surely (hopefully) be replaced in the future with a
+ // real memory allocator that can handle the restriction of the main
+ // thread.
+ //
+ //
+ // FIXME: We can also possibly add an optimization here to detect
+ // when a thread is the main thread or not and block on all
+ // non-main-thread threads. Currently, however, we have no way
+ // of knowing which wasm thread is on the browser main thread, but
+ // if we could figure out we could at least somewhat mitigate the
+ // cost of this spinning.
+ }
+ }
+
+ impl Drop for DropLock {
+ fn drop(&mut self) {
+ let r = LOCKED.swap(0, SeqCst);
+ debug_assert_eq!(r, 1);
+
+ // Note that due to the above logic we don't actually need to wake
+ // anyone up, but if we did it'd likely look something like this:
+ //
+ // unsafe {
+ // core::arch::wasm32::atomic_notify(
+ // LOCKED.as_mut_ptr(),
+ // 1, // only one thread
+ // );
+ // }
+ }
+ }
+}
+
+#[cfg(not(target_feature = "atomics"))]
+mod lock {
+ #[inline]
+ pub fn lock() {} // no atomics, no threads, that's easy!
+}
diff --git a/library/std/src/sys/wasm/atomics/futex.rs b/library/std/src/sys/wasm/atomics/futex.rs
new file mode 100644
index 000000000..f4fbe9f48
--- /dev/null
+++ b/library/std/src/sys/wasm/atomics/futex.rs
@@ -0,0 +1,34 @@
+use crate::arch::wasm32;
+use crate::sync::atomic::AtomicU32;
+use crate::time::Duration;
+
+/// Wait for a futex_wake operation to wake us.
+///
+/// Returns directly if the futex doesn't hold the expected value.
+///
+/// Returns false on timeout, and true in all other cases.
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1);
+ unsafe {
+ wasm32::memory_atomic_wait32(
+ futex as *const AtomicU32 as *mut i32,
+ expected as i32,
+ timeout,
+ ) < 2
+ }
+}
+
+/// Wake up one thread that's blocked on futex_wait on this futex.
+///
+/// Returns true if this actually woke up such a thread,
+/// or false if no thread was waiting on this futex.
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ unsafe { wasm32::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, 1) > 0 }
+}
+
+/// Wake up all threads that are waiting on futex_wait on this futex.
+pub fn futex_wake_all(futex: &AtomicU32) {
+ unsafe {
+ wasm32::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, i32::MAX as u32);
+ }
+}
diff --git a/library/std/src/sys/wasm/atomics/thread.rs b/library/std/src/sys/wasm/atomics/thread.rs
new file mode 100644
index 000000000..714b70492
--- /dev/null
+++ b/library/std/src/sys/wasm/atomics/thread.rs
@@ -0,0 +1,55 @@
+use crate::ffi::CStr;
+use crate::io;
+use crate::num::NonZeroUsize;
+use crate::sys::unsupported;
+use crate::time::Duration;
+
+pub struct Thread(!);
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ unsupported()
+ }
+
+ pub fn yield_now() {}
+
+ pub fn set_name(_name: &CStr) {}
+
+ pub fn sleep(dur: Duration) {
+ use crate::arch::wasm32;
+ use crate::cmp;
+
+ // Use an atomic wait to block the current thread artificially with a
+ // timeout listed. Note that we should never be notified (return value
+ // of 0) or our comparison should never fail (return value of 1) so we
+ // should always only resume execution through a timeout (return value
+ // 2).
+ let mut nanos = dur.as_nanos();
+ while nanos > 0 {
+ let amt = cmp::min(i64::MAX as u128, nanos);
+ let mut x = 0;
+ let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) };
+ debug_assert_eq!(val, 2);
+ nanos -= amt;
+ }
+ }
+
+ pub fn join(self) {}
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ unsupported()
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/wasm/env.rs b/library/std/src/sys/wasm/env.rs
new file mode 100644
index 000000000..730e356d7
--- /dev/null
+++ b/library/std/src/sys/wasm/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "";
+ pub const OS: &str = "";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = ".wasm";
+ pub const DLL_EXTENSION: &str = "wasm";
+ pub const EXE_SUFFIX: &str = ".wasm";
+ pub const EXE_EXTENSION: &str = "wasm";
+}
diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs
new file mode 100644
index 000000000..4159efe2a
--- /dev/null
+++ b/library/std/src/sys/wasm/mod.rs
@@ -0,0 +1,77 @@
+//! System bindings for the wasm/web platform
+//!
+//! This module contains the facade (aka platform-specific) implementations of
+//! OS level functionality for wasm. Note that this wasm is *not* the emscripten
+//! wasm, so we have no runtime here.
+//!
+//! This is all super highly experimental and not actually intended for
+//! wide/production use yet, it's still all in the experimental category. This
+//! will likely change over time.
+//!
+//! Currently all functions here are basically stubs that immediately return
+//! errors. The hope is that with a portability lint we can turn actually just
+//! remove all this and just omit parts of the standard library if we're
+//! compiling for wasm. That way it's a compile time error for something that's
+//! guaranteed to be a runtime error!
+
+#![deny(unsafe_op_in_unsafe_fn)]
+
+pub mod alloc;
+#[path = "../unsupported/args.rs"]
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+#[path = "../unsupported/fs.rs"]
+pub mod fs;
+#[path = "../unsupported/io.rs"]
+pub mod io;
+#[path = "../unsupported/net.rs"]
+pub mod net;
+#[path = "../unsupported/os.rs"]
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+#[path = "../unix/path.rs"]
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+#[path = "../unsupported/stdio.rs"]
+pub mod stdio;
+#[path = "../unsupported/thread_local_dtor.rs"]
+pub mod thread_local_dtor;
+#[path = "../unsupported/thread_local_key.rs"]
+pub mod thread_local_key;
+#[path = "../unsupported/time.rs"]
+pub mod time;
+
+cfg_if::cfg_if! {
+ if #[cfg(target_feature = "atomics")] {
+ #[path = "../unix/locks"]
+ pub mod locks {
+ #![allow(unsafe_op_in_unsafe_fn)]
+ mod futex_condvar;
+ mod futex_mutex;
+ mod futex_rwlock;
+ pub(crate) use futex_condvar::{Condvar, MovableCondvar};
+ pub(crate) use futex_mutex::{Mutex, MovableMutex};
+ pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ }
+ #[path = "atomics/futex.rs"]
+ pub mod futex;
+ #[path = "atomics/thread.rs"]
+ pub mod thread;
+ } else {
+ #[path = "../unsupported/locks/mod.rs"]
+ pub mod locks;
+ #[path = "../unsupported/thread.rs"]
+ pub mod thread;
+ }
+}
+
+#[path = "../unsupported/common.rs"]
+#[deny(unsafe_op_in_unsafe_fn)]
+mod common;
+pub use common::*;