summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys/xous/thread.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /library/std/src/sys/xous/thread.rs
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/std/src/sys/xous/thread.rs')
-rw-r--r--library/std/src/sys/xous/thread.rs144
1 files changed, 144 insertions, 0 deletions
diff --git a/library/std/src/sys/xous/thread.rs b/library/std/src/sys/xous/thread.rs
new file mode 100644
index 000000000..78c68de7b
--- /dev/null
+++ b/library/std/src/sys/xous/thread.rs
@@ -0,0 +1,144 @@
+use crate::ffi::CStr;
+use crate::io;
+use crate::num::NonZeroUsize;
+use crate::os::xous::ffi::{
+ blocking_scalar, create_thread, do_yield, join_thread, map_memory, update_memory_flags,
+ MemoryFlags, Syscall, ThreadId,
+};
+use crate::os::xous::services::{ticktimer_server, TicktimerScalar};
+use crate::time::Duration;
+use core::arch::asm;
+
+pub struct Thread {
+ tid: ThreadId,
+}
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 131072;
+const MIN_STACK_SIZE: usize = 4096;
+pub const GUARD_PAGE_SIZE: usize = 4096;
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let p = Box::into_raw(Box::new(p));
+ let mut stack_size = crate::cmp::max(stack, MIN_STACK_SIZE);
+
+ if (stack_size & 4095) != 0 {
+ stack_size = (stack_size + 4095) & !4095;
+ }
+
+ // Allocate the whole thing, then divide it up after the fact. This ensures that
+ // even if there's a context switch during this function, the whole stack plus
+ // guard pages will remain contiguous.
+ let stack_plus_guard_pages: &mut [u8] = unsafe {
+ map_memory(
+ None,
+ None,
+ GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE,
+ MemoryFlags::R | MemoryFlags::W | MemoryFlags::X,
+ )
+ }
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?;
+
+ // No access to this page. Note: Write-only pages are illegal, and will
+ // cause an access violation.
+ unsafe {
+ update_memory_flags(&mut stack_plus_guard_pages[0..GUARD_PAGE_SIZE], MemoryFlags::W)
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?
+ };
+
+ // No access to this page. Note: Write-only pages are illegal, and will
+ // cause an access violation.
+ unsafe {
+ update_memory_flags(
+ &mut stack_plus_guard_pages[(GUARD_PAGE_SIZE + stack_size)..],
+ MemoryFlags::W,
+ )
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?
+ };
+
+ let guard_page_pre = stack_plus_guard_pages.as_ptr() as usize;
+ let tid = create_thread(
+ thread_start as *mut usize,
+ &mut stack_plus_guard_pages[GUARD_PAGE_SIZE..(stack_size + GUARD_PAGE_SIZE)],
+ p as usize,
+ guard_page_pre,
+ stack_size,
+ 0,
+ )
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?;
+
+ extern "C" fn thread_start(main: *mut usize, guard_page_pre: usize, stack_size: usize) {
+ unsafe {
+ // Finally, let's run some code.
+ Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ }
+
+ // Destroy TLS, which will free the TLS page and call the destructor for
+ // any thread local storage.
+ unsafe {
+ crate::sys::thread_local_key::destroy_tls();
+ }
+
+ // Deallocate the stack memory, along with the guard pages. Afterwards,
+ // exit the thread by returning to the magic address 0xff80_3000usize,
+ // which tells the kernel to deallocate this thread.
+ let mapped_memory_base = guard_page_pre;
+ let mapped_memory_length = GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE;
+ unsafe {
+ asm!(
+ "ecall",
+ "ret",
+ in("a0") Syscall::UnmapMemory as usize,
+ in("a1") mapped_memory_base,
+ in("a2") mapped_memory_length,
+ in("ra") 0xff80_3000usize,
+ options(nomem, nostack, noreturn)
+ );
+ }
+ }
+
+ Ok(Thread { tid })
+ }
+
+ pub fn yield_now() {
+ do_yield();
+ }
+
+ pub fn set_name(_name: &CStr) {
+ // nope
+ }
+
+ pub fn sleep(dur: Duration) {
+ // Because the sleep server works on units of `usized milliseconds`, split
+ // the messages up into these chunks. This means we may run into issues
+ // if you try to sleep a thread for more than 49 days on a 32-bit system.
+ let mut millis = dur.as_millis();
+ while millis > 0 {
+ let sleep_duration =
+ if millis > (usize::MAX as _) { usize::MAX } else { millis as usize };
+ blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into())
+ .expect("failed to send message to ticktimer server");
+ millis -= sleep_duration as u128;
+ }
+ }
+
+ pub fn join(self) {
+ join_thread(self.tid).unwrap();
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ // We're unicore right now.
+ Ok(unsafe { NonZeroUsize::new_unchecked(1) })
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}