diff options
Diffstat (limited to 'library/std/src/sys/sgx/abi')
-rw-r--r-- | library/std/src/sys/sgx/abi/entry.S | 372 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/mem.rs | 93 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/mod.rs | 108 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/panic.rs | 42 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/reloc.rs | 32 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/thread.rs | 13 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/tls/mod.rs | 132 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/tls/sync_bitset.rs | 85 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs | 25 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/usercalls/alloc.rs | 732 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/usercalls/mod.rs | 323 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/usercalls/raw.rs | 251 | ||||
-rw-r--r-- | library/std/src/sys/sgx/abi/usercalls/tests.rs | 30 |
13 files changed, 2238 insertions, 0 deletions
diff --git a/library/std/src/sys/sgx/abi/entry.S b/library/std/src/sys/sgx/abi/entry.S new file mode 100644 index 000000000..f61bcf06f --- /dev/null +++ b/library/std/src/sys/sgx/abi/entry.S @@ -0,0 +1,372 @@ +/* This symbol is used at runtime to figure out the virtual address that the */ +/* enclave is loaded at. */ +.section absolute +.global IMAGE_BASE +IMAGE_BASE: + +.section ".note.x86_64-fortanix-unknown-sgx", "", @note + .align 4 + .long 1f - 0f /* name length (not including padding) */ + .long 3f - 2f /* desc length (not including padding) */ + .long 1 /* type = NT_VERSION */ +0: .asciz "toolchain-version" /* name */ +1: .align 4 +2: .long 1 /* desc - toolchain version number, 32-bit LE */ +3: .align 4 + +.section .rodata +/* The XSAVE area needs to be a large chunk of readable memory, but since we are */ +/* going to restore everything to its initial state (XSTATE_BV=0), only certain */ +/* parts need to have a defined value. In particular: */ +/* */ +/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ +/* RFBM[2] is set, regardless of the value of XSTATE_BV */ +/* * XSAVE header */ +.align 64 +.Lxsave_clear: +.org .+24 +.Lxsave_mxcsr: + .short 0x1f80 + +/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ + +/* The following symbols point at read-only data that will be filled in by the */ +/* post-linker. */ + +/* When using this macro, don't forget to adjust the linker version script! */ +.macro globvar name:req size:req + .global \name + .protected \name + .align \size + .size \name , \size + \name : + .org .+\size +.endm + /* The base address (relative to enclave start) of the heap area */ + globvar HEAP_BASE 8 + /* The heap size in bytes */ + globvar HEAP_SIZE 8 + /* Value of the RELA entry in the dynamic table */ + globvar RELA 8 + /* Value of the RELACOUNT entry in the dynamic table */ + globvar RELACOUNT 8 + /* The enclave size in bytes */ + globvar ENCLAVE_SIZE 8 + /* The base address (relative to enclave start) of the enclave configuration area */ + globvar CFGDATA_BASE 8 + /* Non-zero if debugging is enabled, zero otherwise */ + globvar DEBUG 1 + /* The base address (relative to enclave start) of the enclave text section */ + globvar TEXT_BASE 8 + /* The size in bytes of enclacve text section */ + globvar TEXT_SIZE 8 + /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ + globvar EH_FRM_HDR_OFFSET 8 + /* The size in bytes of enclave .eh_frame_hdr section */ + globvar EH_FRM_HDR_LEN 8 + /* The base address (relative to enclave start) of the enclave .eh_frame section */ + globvar EH_FRM_OFFSET 8 + /* The size in bytes of enclacve .eh_frame section */ + globvar EH_FRM_LEN 8 + +.org .Lxsave_clear+512 +.Lxsave_header: + .int 0, 0 /* XSTATE_BV */ + .int 0, 0 /* XCOMP_BV */ + .org .+48 /* reserved bits */ + +.data +.Laborted: + .byte 0 + +/* TCS local storage section */ +.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ +.equ tcsls_flags, 0x08 /* initialized by loader */ +.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ +.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ +/* 14 unused bits */ +.equ tcsls_user_fcw, 0x0a +.equ tcsls_user_mxcsr, 0x0c +.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ +.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ +.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ +.equ tcsls_user_rsp, 0x28 +.equ tcsls_user_retip, 0x30 +.equ tcsls_user_rbp, 0x38 +.equ tcsls_user_r12, 0x40 +.equ tcsls_user_r13, 0x48 +.equ tcsls_user_r14, 0x50 +.equ tcsls_user_r15, 0x58 +.equ tcsls_tls_ptr, 0x60 +.equ tcsls_tcs_addr, 0x68 + +.macro load_tcsls_flag_secondary_bool reg:req comments:vararg + .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ + .abort + .endif + mov $(1<<tcsls_flag_secondary),%e\reg + and %gs:tcsls_flags,%\reg +.endm + +/* We place the ELF entry point in a separate section so it can be removed by + elf2sgxs */ +.section .text_no_sgx, "ax" +.Lelf_entry_error_msg: + .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" +.Lelf_entry_error_msg_end: + +.global elf_entry +.type elf_entry,function +elf_entry: +/* print error message */ + movq $2,%rdi /* write to stderr (fd 2) */ + lea .Lelf_entry_error_msg(%rip),%rsi + movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx +.Lelf_entry_call: + movq $1,%rax /* write() syscall */ + syscall + test %rax,%rax + jle .Lelf_exit /* exit on error */ + add %rax,%rsi + sub %rax,%rdx /* all chars written? */ + jnz .Lelf_entry_call + +.Lelf_exit: + movq $60,%rax /* exit() syscall */ + movq $1,%rdi /* exit code 1 */ + syscall + ud2 /* should not be reached */ +/* end elf_entry */ + +/* This code needs to be called *after* the enclave stack has been setup. */ +/* There are 3 places where this needs to happen, so this is put in a macro. */ +.macro entry_sanitize_final +/* Sanitize rflags received from user */ +/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ +/* - AC flag: AEX on misaligned memory accesses leaks side channel info */ + pushfq + andq $~0x40400, (%rsp) + popfq +/* check for abort */ + bt $0,.Laborted(%rip) + jc .Lreentry_panic +.endm + +.text +.global sgx_entry +.type sgx_entry,function +sgx_entry: +/* save user registers */ + mov %rcx,%gs:tcsls_user_retip + mov %rsp,%gs:tcsls_user_rsp + mov %rbp,%gs:tcsls_user_rbp + mov %r12,%gs:tcsls_user_r12 + mov %r13,%gs:tcsls_user_r13 + mov %r14,%gs:tcsls_user_r14 + mov %r15,%gs:tcsls_user_r15 + mov %rbx,%gs:tcsls_tcs_addr + stmxcsr %gs:tcsls_user_mxcsr + fnstcw %gs:tcsls_user_fcw + +/* check for debug buffer pointer */ + testb $0xff,DEBUG(%rip) + jz .Lskip_debug_init + mov %r10,%gs:tcsls_debug_panic_buf_ptr +.Lskip_debug_init: +/* reset cpu state */ + mov %rdx, %r10 + mov $-1, %rax + mov $-1, %rdx + xrstor .Lxsave_clear(%rip) + mov %r10, %rdx + +/* check if returning from usercall */ + mov %gs:tcsls_last_rsp,%r11 + test %r11,%r11 + jnz .Lusercall_ret +/* setup stack */ + mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ + /* here. This is fixed below under "adjust stack". */ +/* check for thread init */ + bts $tcsls_flag_init_once,%gs:tcsls_flags + jc .Lskip_init +/* adjust stack */ + lea IMAGE_BASE(%rip),%rax + add %rax,%rsp + mov %rsp,%gs:tcsls_tos + entry_sanitize_final +/* call tcs_init */ +/* store caller-saved registers in callee-saved registers */ + mov %rdi,%rbx + mov %rsi,%r12 + mov %rdx,%r13 + mov %r8,%r14 + mov %r9,%r15 + load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ + call tcs_init +/* reload caller-saved registers */ + mov %rbx,%rdi + mov %r12,%rsi + mov %r13,%rdx + mov %r14,%r8 + mov %r15,%r9 + jmp .Lafter_init +.Lskip_init: + entry_sanitize_final +.Lafter_init: +/* call into main entry point */ + load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ + call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ + mov %rax,%rsi /* RSI = return value */ + /* NOP: mov %rdx,%rdx */ /* RDX = return value */ + xor %rdi,%rdi /* RDI = normal exit */ +.Lexit: +/* clear general purpose register state */ + /* RAX overwritten by ENCLU */ + /* RBX set later */ + /* RCX overwritten by ENCLU */ + /* RDX contains return value */ + /* RSP set later */ + /* RBP set later */ + /* RDI contains exit mode */ + /* RSI contains return value */ + xor %r8,%r8 + xor %r9,%r9 + xor %r10,%r10 + xor %r11,%r11 + /* R12 ~ R15 set by sgx_exit */ +.Lsgx_exit: +/* clear extended register state */ + mov %rdx, %rcx /* save RDX */ + mov $-1, %rax + mov %rax, %rdx + xrstor .Lxsave_clear(%rip) + mov %rcx, %rdx /* restore RDX */ +/* clear flags */ + pushq $0 + popfq +/* restore user registers */ + mov %gs:tcsls_user_r12,%r12 + mov %gs:tcsls_user_r13,%r13 + mov %gs:tcsls_user_r14,%r14 + mov %gs:tcsls_user_r15,%r15 + mov %gs:tcsls_user_retip,%rbx + mov %gs:tcsls_user_rsp,%rsp + mov %gs:tcsls_user_rbp,%rbp + fldcw %gs:tcsls_user_fcw + ldmxcsr %gs:tcsls_user_mxcsr +/* exit enclave */ + mov $0x4,%eax /* EEXIT */ + enclu +/* end sgx_entry */ + +.Lreentry_panic: + orq $8,%rsp + jmp abort_reentry + +/* This *MUST* be called with 6 parameters, otherwise register information */ +/* might leak! */ +.global usercall +usercall: + test %rcx,%rcx /* check `abort` function argument */ + jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ + jmp .Lusercall_save_state /* non-aborting usercall */ +.Lusercall_abort: +/* set aborted bit */ + movb $1,.Laborted(%rip) +/* save registers in DEBUG mode, so that debugger can reconstruct the stack */ + testb $0xff,DEBUG(%rip) + jz .Lusercall_noreturn +.Lusercall_save_state: +/* save callee-saved state */ + push %r15 + push %r14 + push %r13 + push %r12 + push %rbp + push %rbx + sub $8, %rsp + fstcw 4(%rsp) + stmxcsr (%rsp) + movq %rsp,%gs:tcsls_last_rsp +.Lusercall_noreturn: +/* clear general purpose register state */ + /* RAX overwritten by ENCLU */ + /* RBX set by sgx_exit */ + /* RCX overwritten by ENCLU */ + /* RDX contains parameter */ + /* RSP set by sgx_exit */ + /* RBP set by sgx_exit */ + /* RDI contains parameter */ + /* RSI contains parameter */ + /* R8 contains parameter */ + /* R9 contains parameter */ + xor %r10,%r10 + xor %r11,%r11 + /* R12 ~ R15 set by sgx_exit */ +/* extended registers/flags cleared by sgx_exit */ +/* exit */ + jmp .Lsgx_exit +.Lusercall_ret: + movq $0,%gs:tcsls_last_rsp +/* restore callee-saved state, cf. "save" above */ + mov %r11,%rsp + ldmxcsr (%rsp) + fldcw 4(%rsp) + add $8, %rsp + entry_sanitize_final + pop %rbx + pop %rbp + pop %r12 + pop %r13 + pop %r14 + pop %r15 +/* return */ + mov %rsi,%rax /* RAX = return value */ + /* NOP: mov %rdx,%rdx */ /* RDX = return value */ + pop %r11 + lfence + jmp *%r11 + +/* +The following functions need to be defined externally: +``` +// Called by entry code on re-entry after exit +extern "C" fn abort_reentry() -> !; + +// Called once when a TCS is first entered +extern "C" fn tcs_init(secondary: bool); + +// Standard TCS entrypoint +extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); +``` +*/ + +.global get_tcs_addr +get_tcs_addr: + mov %gs:tcsls_tcs_addr,%rax + pop %r11 + lfence + jmp *%r11 + +.global get_tls_ptr +get_tls_ptr: + mov %gs:tcsls_tls_ptr,%rax + pop %r11 + lfence + jmp *%r11 + +.global set_tls_ptr +set_tls_ptr: + mov %rdi,%gs:tcsls_tls_ptr + pop %r11 + lfence + jmp *%r11 + +.global take_debug_panic_buf_ptr +take_debug_panic_buf_ptr: + xor %rax,%rax + xchg %gs:tcsls_debug_panic_buf_ptr,%rax + pop %r11 + lfence + jmp *%r11 diff --git a/library/std/src/sys/sgx/abi/mem.rs b/library/std/src/sys/sgx/abi/mem.rs new file mode 100644 index 000000000..18e6d5b3f --- /dev/null +++ b/library/std/src/sys/sgx/abi/mem.rs @@ -0,0 +1,93 @@ +use core::arch::asm; + +// Do not remove inline: will result in relocation failure +#[inline(always)] +pub(crate) unsafe fn rel_ptr<T>(offset: u64) -> *const T { + (image_base() + offset) as *const T +} + +// Do not remove inline: will result in relocation failure +#[inline(always)] +pub(crate) unsafe fn rel_ptr_mut<T>(offset: u64) -> *mut T { + (image_base() + offset) as *mut T +} + +extern "C" { + static ENCLAVE_SIZE: usize; + static HEAP_BASE: u64; + static HEAP_SIZE: usize; +} + +/// Returns the base memory address of the heap +pub(crate) fn heap_base() -> *const u8 { + unsafe { rel_ptr_mut(HEAP_BASE) } +} + +/// Returns the size of the heap +pub(crate) fn heap_size() -> usize { + unsafe { HEAP_SIZE } +} + +// Do not remove inline: will result in relocation failure +// For the same reason we use inline ASM here instead of an extern static to +// locate the base +/// Returns address at which current enclave is loaded. +#[inline(always)] +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn image_base() -> u64 { + let base: u64; + unsafe { + asm!( + "lea IMAGE_BASE(%rip), {}", + lateout(reg) base, + options(att_syntax, nostack, preserves_flags, nomem, pure), + ) + }; + base +} + +/// Returns `true` if the specified memory range is in the enclave. +/// +/// For safety, this function also checks whether the range given overflows, +/// returning `false` if so. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn is_enclave_range(p: *const u8, len: usize) -> bool { + let start = p as usize; + + // Subtract one from `len` when calculating `end` in case `p + len` is + // exactly at the end of addressable memory (`p + len` would overflow, but + // the range is still valid). + let end = if len == 0 { + start + } else if let Some(end) = start.checked_add(len - 1) { + end + } else { + return false; + }; + + let base = image_base() as usize; + start >= base && end <= base + (unsafe { ENCLAVE_SIZE } - 1) // unsafe ok: link-time constant +} + +/// Returns `true` if the specified memory range is in userspace. +/// +/// For safety, this function also checks whether the range given overflows, +/// returning `false` if so. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn is_user_range(p: *const u8, len: usize) -> bool { + let start = p as usize; + + // Subtract one from `len` when calculating `end` in case `p + len` is + // exactly at the end of addressable memory (`p + len` would overflow, but + // the range is still valid). + let end = if len == 0 { + start + } else if let Some(end) = start.checked_add(len - 1) { + end + } else { + return false; + }; + + let base = image_base() as usize; + end < base || start > base + (unsafe { ENCLAVE_SIZE } - 1) // unsafe ok: link-time constant +} diff --git a/library/std/src/sys/sgx/abi/mod.rs b/library/std/src/sys/sgx/abi/mod.rs new file mode 100644 index 000000000..9508c3874 --- /dev/null +++ b/library/std/src/sys/sgx/abi/mod.rs @@ -0,0 +1,108 @@ +#![cfg_attr(test, allow(unused))] // RT initialization logic is not compiled for test + +use crate::io::Write; +use core::arch::global_asm; +use core::sync::atomic::{AtomicUsize, Ordering}; + +// runtime features +pub(super) mod panic; +mod reloc; + +// library features +pub mod mem; +pub mod thread; +pub mod tls; +#[macro_use] +pub mod usercalls; + +#[cfg(not(test))] +global_asm!(include_str!("entry.S"), options(att_syntax)); + +#[repr(C)] +struct EntryReturn(u64, u64); + +#[cfg(not(test))] +#[no_mangle] +unsafe extern "C" fn tcs_init(secondary: bool) { + // Be very careful when changing this code: it runs before the binary has been + // relocated. Any indirect accesses to symbols will likely fail. + const UNINIT: usize = 0; + const BUSY: usize = 1; + const DONE: usize = 2; + // Three-state spin-lock + static RELOC_STATE: AtomicUsize = AtomicUsize::new(UNINIT); + + if secondary && RELOC_STATE.load(Ordering::Relaxed) != DONE { + rtabort!("Entered secondary TCS before main TCS!") + } + + // Try to atomically swap UNINIT with BUSY. The returned state can be: + match RELOC_STATE.compare_exchange(UNINIT, BUSY, Ordering::Acquire, Ordering::Acquire) { + // This thread just obtained the lock and other threads will observe BUSY + Ok(_) => { + reloc::relocate_elf_rela(); + RELOC_STATE.store(DONE, Ordering::Release); + } + // We need to wait until the initialization is done. + Err(BUSY) => { + while RELOC_STATE.load(Ordering::Acquire) == BUSY { + core::hint::spin_loop(); + } + } + // Initialization is done. + Err(DONE) => {} + _ => unreachable!(), + } +} + +// FIXME: this item should only exist if this is linked into an executable +// (main function exists). If this is a library, the crate author should be +// able to specify this +#[cfg(not(test))] +#[no_mangle] +extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> EntryReturn { + // FIXME: how to support TLS in library mode? + let tls = Box::new(tls::Tls::new()); + let tls_guard = unsafe { tls.activate() }; + + if secondary { + let join_notifier = super::thread::Thread::entry(); + drop(tls_guard); + drop(join_notifier); + + EntryReturn(0, 0) + } else { + extern "C" { + fn main(argc: isize, argv: *const *const u8) -> isize; + } + + // check entry is being called according to ABI + rtassert!(p3 == 0); + rtassert!(p4 == 0); + rtassert!(p5 == 0); + + unsafe { + // The actual types of these arguments are `p1: *const Arg, p2: + // usize`. We can't currently customize the argument list of Rust's + // main function, so we pass these in as the standard pointer-sized + // values in `argc` and `argv`. + let ret = main(p2 as _, p1 as _); + exit_with_code(ret) + } + } +} + +pub(super) fn exit_with_code(code: isize) -> ! { + if code != 0 { + if let Some(mut out) = panic::SgxPanicOutput::new() { + let _ = write!(out, "Exited with status code {code}"); + } + } + usercalls::exit(code != 0); +} + +#[cfg(not(test))] +#[no_mangle] +extern "C" fn abort_reentry() -> ! { + usercalls::exit(false) +} diff --git a/library/std/src/sys/sgx/abi/panic.rs b/library/std/src/sys/sgx/abi/panic.rs new file mode 100644 index 000000000..229b3b329 --- /dev/null +++ b/library/std/src/sys/sgx/abi/panic.rs @@ -0,0 +1,42 @@ +use super::usercalls::alloc::UserRef; +use crate::cmp; +use crate::io::{self, Write}; +use crate::mem; + +extern "C" { + fn take_debug_panic_buf_ptr() -> *mut u8; + static DEBUG: u8; +} + +pub(crate) struct SgxPanicOutput(Option<&'static mut UserRef<[u8]>>); + +fn empty_user_slice() -> &'static mut UserRef<[u8]> { + unsafe { UserRef::from_raw_parts_mut(1 as *mut u8, 0) } +} + +impl SgxPanicOutput { + pub(crate) fn new() -> Option<Self> { + if unsafe { DEBUG == 0 } { None } else { Some(SgxPanicOutput(None)) } + } + + fn init(&mut self) -> &mut &'static mut UserRef<[u8]> { + self.0.get_or_insert_with(|| unsafe { + let ptr = take_debug_panic_buf_ptr(); + if ptr.is_null() { empty_user_slice() } else { UserRef::from_raw_parts_mut(ptr, 1024) } + }) + } +} + +impl Write for SgxPanicOutput { + fn write(&mut self, src: &[u8]) -> io::Result<usize> { + let dst = mem::replace(self.init(), empty_user_slice()); + let written = cmp::min(src.len(), dst.len()); + dst[..written].copy_from_enclave(&src[..written]); + self.0 = Some(&mut dst[written..]); + Ok(written) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} diff --git a/library/std/src/sys/sgx/abi/reloc.rs b/library/std/src/sys/sgx/abi/reloc.rs new file mode 100644 index 000000000..02dff0ad2 --- /dev/null +++ b/library/std/src/sys/sgx/abi/reloc.rs @@ -0,0 +1,32 @@ +use super::mem; +use crate::slice::from_raw_parts; + +const R_X86_64_RELATIVE: u32 = 8; + +#[repr(packed)] +struct Rela<T> { + offset: T, + info: T, + addend: T, +} + +pub fn relocate_elf_rela() { + extern "C" { + static RELA: u64; + static RELACOUNT: usize; + } + + if unsafe { RELACOUNT } == 0 { + return; + } // unsafe ok: link-time constant + + let relas = unsafe { + from_raw_parts::<Rela<u64>>(mem::rel_ptr(RELA), RELACOUNT) // unsafe ok: link-time constant + }; + for rela in relas { + if rela.info != (/*0 << 32 |*/R_X86_64_RELATIVE as u64) { + rtabort!("Invalid relocation"); + } + unsafe { *mem::rel_ptr_mut::<*const ()>(rela.offset) = mem::rel_ptr(rela.addend) }; + } +} diff --git a/library/std/src/sys/sgx/abi/thread.rs b/library/std/src/sys/sgx/abi/thread.rs new file mode 100644 index 000000000..ef55b821a --- /dev/null +++ b/library/std/src/sys/sgx/abi/thread.rs @@ -0,0 +1,13 @@ +use fortanix_sgx_abi::Tcs; + +/// Gets the ID for the current thread. The ID is guaranteed to be unique among +/// all currently running threads in the enclave, and it is guaranteed to be +/// constant for the lifetime of the thread. More specifically for SGX, there +/// is a one-to-one correspondence of the ID to the address of the TCS. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn current() -> Tcs { + extern "C" { + fn get_tcs_addr() -> Tcs; + } + unsafe { get_tcs_addr() } +} diff --git a/library/std/src/sys/sgx/abi/tls/mod.rs b/library/std/src/sys/sgx/abi/tls/mod.rs new file mode 100644 index 000000000..13d96e9a6 --- /dev/null +++ b/library/std/src/sys/sgx/abi/tls/mod.rs @@ -0,0 +1,132 @@ +mod sync_bitset; + +use self::sync_bitset::*; +use crate::cell::Cell; +use crate::mem; +use crate::num::NonZeroUsize; +use crate::ptr; +use crate::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_pointer_width = "64")] +const USIZE_BITS: usize = 64; +const TLS_KEYS: usize = 128; // Same as POSIX minimum +const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS; + +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"] +static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT; +macro_rules! dup { + ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* )); + (() $($val:tt)*) => ([$($val),*]) +} +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"] +static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0))); + +extern "C" { + fn get_tls_ptr() -> *const u8; + fn set_tls_ptr(tls: *const u8); +} + +#[derive(Copy, Clone)] +#[repr(C)] +pub struct Key(NonZeroUsize); + +impl Key { + fn to_index(self) -> usize { + self.0.get() - 1 + } + + fn from_index(index: usize) -> Self { + Key(NonZeroUsize::new(index + 1).unwrap()) + } + + pub fn as_usize(self) -> usize { + self.0.get() + } + + pub fn from_usize(index: usize) -> Self { + Key(NonZeroUsize::new(index).unwrap()) + } +} + +#[repr(C)] +pub struct Tls { + data: [Cell<*mut u8>; TLS_KEYS], +} + +pub struct ActiveTls<'a> { + tls: &'a Tls, +} + +impl<'a> Drop for ActiveTls<'a> { + fn drop(&mut self) { + let value_with_destructor = |key: usize| { + let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed); + unsafe { mem::transmute::<_, Option<unsafe extern "C" fn(*mut u8)>>(ptr) } + .map(|dtor| (&self.tls.data[key], dtor)) + }; + + let mut any_non_null_dtor = true; + while any_non_null_dtor { + any_non_null_dtor = false; + for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) { + let value = value.replace(ptr::null_mut()); + if !value.is_null() { + any_non_null_dtor = true; + unsafe { dtor(value) } + } + } + } + } +} + +impl Tls { + pub fn new() -> Tls { + Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) } + } + + pub unsafe fn activate(&self) -> ActiveTls<'_> { + // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. + unsafe { set_tls_ptr(self as *const Tls as _) }; + ActiveTls { tls: self } + } + + #[allow(unused)] + pub unsafe fn activate_persistent(self: Box<Self>) { + // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. + unsafe { set_tls_ptr((&*self) as *const Tls as _) }; + mem::forget(self); + } + + unsafe fn current<'a>() -> &'a Tls { + // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. + unsafe { &*(get_tls_ptr() as *const Tls) } + } + + pub fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + let index = if let Some(index) = TLS_KEY_IN_USE.set() { + index + } else { + rtabort!("TLS limit exceeded") + }; + TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed); + Key::from_index(index) + } + + pub fn set(key: Key, value: *mut u8) { + let index = key.to_index(); + rtassert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].set(value); + } + + pub fn get(key: Key) -> *mut u8 { + let index = key.to_index(); + rtassert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].get() + } + + pub fn destroy(key: Key) { + TLS_KEY_IN_USE.clear(key.to_index()); + } +} diff --git a/library/std/src/sys/sgx/abi/tls/sync_bitset.rs b/library/std/src/sys/sgx/abi/tls/sync_bitset.rs new file mode 100644 index 000000000..4eeff8f6e --- /dev/null +++ b/library/std/src/sys/sgx/abi/tls/sync_bitset.rs @@ -0,0 +1,85 @@ +#[cfg(test)] +mod tests; + +use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS}; +use crate::iter::{Enumerate, Peekable}; +use crate::slice::Iter; +use crate::sync::atomic::{AtomicUsize, Ordering}; + +/// A bitset that can be used synchronously. +pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]); + +pub(super) const SYNC_BITSET_INIT: SyncBitset = + SyncBitset([AtomicUsize::new(0), AtomicUsize::new(0)]); + +impl SyncBitset { + pub fn get(&self, index: usize) -> bool { + let (hi, lo) = Self::split(index); + (self.0[hi].load(Ordering::Relaxed) & lo) != 0 + } + + /// Not atomic. + pub fn iter(&self) -> SyncBitsetIter<'_> { + SyncBitsetIter { iter: self.0.iter().enumerate().peekable(), elem_idx: 0 } + } + + pub fn clear(&self, index: usize) { + let (hi, lo) = Self::split(index); + self.0[hi].fetch_and(!lo, Ordering::Relaxed); + } + + /// Sets any unset bit. Not atomic. Returns `None` if all bits were + /// observed to be set. + pub fn set(&self) -> Option<usize> { + 'elems: for (idx, elem) in self.0.iter().enumerate() { + let mut current = elem.load(Ordering::Relaxed); + loop { + if 0 == !current { + continue 'elems; + } + let trailing_ones = (!current).trailing_zeros() as usize; + match elem.compare_exchange( + current, + current | (1 << trailing_ones), + Ordering::AcqRel, + Ordering::Relaxed, + ) { + Ok(_) => return Some(idx * USIZE_BITS + trailing_ones), + Err(previous) => current = previous, + } + } + } + None + } + + fn split(index: usize) -> (usize, usize) { + (index / USIZE_BITS, 1 << (index % USIZE_BITS)) + } +} + +pub(super) struct SyncBitsetIter<'a> { + iter: Peekable<Enumerate<Iter<'a, AtomicUsize>>>, + elem_idx: usize, +} + +impl<'a> Iterator for SyncBitsetIter<'a> { + type Item = usize; + + fn next(&mut self) -> Option<usize> { + self.iter.peek().cloned().and_then(|(idx, elem)| { + let elem = elem.load(Ordering::Relaxed); + let low_mask = (1 << self.elem_idx) - 1; + let next = elem & !low_mask; + let next_idx = next.trailing_zeros() as usize; + self.elem_idx = next_idx + 1; + if self.elem_idx >= 64 { + self.elem_idx = 0; + self.iter.next(); + } + match next_idx { + 64 => self.next(), + _ => Some(idx * USIZE_BITS + next_idx), + } + }) + } +} diff --git a/library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs b/library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs new file mode 100644 index 000000000..d7eb2e139 --- /dev/null +++ b/library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs @@ -0,0 +1,25 @@ +use super::*; + +fn test_data(bitset: [usize; 2], bit_indices: &[usize]) { + let set = SyncBitset([AtomicUsize::new(bitset[0]), AtomicUsize::new(bitset[1])]); + assert_eq!(set.iter().collect::<Vec<_>>(), bit_indices); + for &i in bit_indices { + assert!(set.get(i)); + } +} + +#[test] +fn iter() { + test_data([0b0110_1001, 0], &[0, 3, 5, 6]); + test_data([0x8000_0000_0000_0000, 0x8000_0000_0000_0001], &[63, 64, 127]); + test_data([0, 0], &[]); +} + +#[test] +fn set_get_clear() { + let set = SYNC_BITSET_INIT; + let key = set.set().unwrap(); + assert!(set.get(key)); + set.clear(key); + assert!(!set.get(key)); +} diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs new file mode 100644 index 000000000..ea24fedd0 --- /dev/null +++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs @@ -0,0 +1,732 @@ +#![allow(unused)] + +use crate::arch::asm; +use crate::cell::UnsafeCell; +use crate::cmp; +use crate::convert::TryInto; +use crate::mem; +use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut}; +use crate::ptr::{self, NonNull}; +use crate::slice; +use crate::slice::SliceIndex; + +use super::super::mem::{is_enclave_range, is_user_range}; +use fortanix_sgx_abi::*; + +/// A type that can be safely read from or written to userspace. +/// +/// Non-exhaustive list of specific requirements for reading and writing: +/// * **Type is `Copy`** (and therefore also not `Drop`). Copies will be +/// created when copying from/to userspace. Destructors will not be called. +/// * **No references or Rust-style owned pointers** (`Vec`, `Arc`, etc.). When +/// reading from userspace, references into enclave memory must not be +/// created. Also, only enclave memory is considered managed by the Rust +/// compiler's static analysis. When reading from userspace, there can be no +/// guarantee that the value correctly adheres to the expectations of the +/// type. When writing to userspace, memory addresses of data in enclave +/// memory must not be leaked for confidentiality reasons. `User` and +/// `UserRef` are also not allowed for the same reasons. +/// * **No fat pointers.** When reading from userspace, the size or vtable +/// pointer could be automatically interpreted and used by the code. When +/// writing to userspace, memory addresses of data in enclave memory (such +/// as vtable pointers) must not be leaked for confidentiality reasons. +/// +/// Non-exhaustive list of specific requirements for reading from userspace: +/// * **Any bit pattern is valid** for this type (no `enum`s). There can be no +/// guarantee that the value correctly adheres to the expectations of the +/// type, so any value must be valid for this type. +/// +/// Non-exhaustive list of specific requirements for writing to userspace: +/// * **No pointers to enclave memory.** Memory addresses of data in enclave +/// memory must not be leaked for confidentiality reasons. +/// * **No internal padding.** Padding might contain previously-initialized +/// secret data stored at that memory location and must not be leaked for +/// confidentiality reasons. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub unsafe trait UserSafeSized: Copy + Sized {} + +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for u8 {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T> UserSafeSized for FifoDescriptor<T> {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for ByteBuffer {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for Usercall {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for Return {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {} + +/// A type that can be represented in memory as one or more `UserSafeSized`s. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub unsafe trait UserSafe { + /// Equivalent to `mem::align_of::<Self>`. + fn align_of() -> usize; + + /// Construct a pointer to `Self` given a memory range in user space. + /// + /// N.B., this takes a size, not a length! + /// + /// # Safety + /// + /// The caller must ensure the memory range is in user memory, is the + /// correct size and is correctly aligned and points to the right type. + unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self; + + /// Construct a pointer to `Self` given a memory range. + /// + /// N.B., this takes a size, not a length! + /// + /// # Safety + /// + /// The caller must ensure the memory range points to the correct type. + /// + /// # Panics + /// + /// This function panics if: + /// + /// * the pointer is not aligned. + /// * the pointer is null. + /// * the pointed-to range does not fit in the address space. + /// * the pointed-to range is not in user memory. + unsafe fn from_raw_sized(ptr: *mut u8, size: usize) -> NonNull<Self> { + assert!(ptr.wrapping_add(size) >= ptr); + // SAFETY: The caller has guaranteed the pointer is valid + let ret = unsafe { Self::from_raw_sized_unchecked(ptr, size) }; + unsafe { + Self::check_ptr(ret); + NonNull::new_unchecked(ret as _) + } + } + + /// Checks if a pointer may point to `Self` in user memory. + /// + /// # Safety + /// + /// The caller must ensure the memory range points to the correct type and + /// length (if this is a slice). + /// + /// # Panics + /// + /// This function panics if: + /// + /// * the pointer is not aligned. + /// * the pointer is null. + /// * the pointed-to range is not in user memory. + unsafe fn check_ptr(ptr: *const Self) { + let is_aligned = |p| -> bool { 0 == (p as usize) & (Self::align_of() - 1) }; + + assert!(is_aligned(ptr as *const u8)); + assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr }))); + assert!(!ptr.is_null()); + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T: UserSafeSized> UserSafe for T { + fn align_of() -> usize { + mem::align_of::<T>() + } + + unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self { + assert_eq!(size, mem::size_of::<T>()); + ptr as _ + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T: UserSafeSized> UserSafe for [T] { + fn align_of() -> usize { + mem::align_of::<T>() + } + + /// # Safety + /// Behavior is undefined if any of these conditions are violated: + /// * `ptr` must be [valid] for writes of `size` many bytes, and it must be + /// properly aligned. + /// + /// [valid]: core::ptr#safety + /// # Panics + /// + /// This function panics if: + /// + /// * the element size is not a factor of the size + unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self { + let elem_size = mem::size_of::<T>(); + assert_eq!(size % elem_size, 0); + let len = size / elem_size; + // SAFETY: The caller must uphold the safety contract for `from_raw_sized_unchecked` + unsafe { slice::from_raw_parts_mut(ptr as _, len) } + } +} + +/// A reference to some type in userspace memory. `&UserRef<T>` is equivalent +/// to `&T` in enclave memory. Access to the memory is only allowed by copying +/// to avoid TOCTTOU issues. After copying, code should make sure to completely +/// check the value before use. +/// +/// It is also possible to obtain a mutable reference `&mut UserRef<T>`. Unlike +/// regular mutable references, these are not exclusive. Userspace may always +/// write to the backing memory at any time, so it can't be assumed that there +/// the pointed-to memory is uniquely borrowed. The two different reference types +/// are used solely to indicate intent: a mutable reference is for writing to +/// user memory, an immutable reference for reading from user memory. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct UserRef<T: ?Sized>(UnsafeCell<T>); +/// An owned type in userspace memory. `User<T>` is equivalent to `Box<T>` in +/// enclave memory. Access to the memory is only allowed by copying to avoid +/// TOCTTOU issues. The user memory will be freed when the value is dropped. +/// After copying, code should make sure to completely check the value before +/// use. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct User<T: UserSafe + ?Sized>(NonNull<UserRef<T>>); + +trait NewUserRef<T: ?Sized> { + unsafe fn new_userref(v: T) -> Self; +} + +impl<T: ?Sized> NewUserRef<*mut T> for NonNull<UserRef<T>> { + unsafe fn new_userref(v: *mut T) -> Self { + // SAFETY: The caller has guaranteed the pointer is valid + unsafe { NonNull::new_unchecked(v as _) } + } +} + +impl<T: ?Sized> NewUserRef<NonNull<T>> for NonNull<UserRef<T>> { + unsafe fn new_userref(v: NonNull<T>) -> Self { + // SAFETY: The caller has guaranteed the pointer is valid + unsafe { NonNull::new_userref(v.as_ptr()) } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> User<T> +where + T: UserSafe, +{ + // This function returns memory that is practically uninitialized, but is + // not considered "unspecified" or "undefined" for purposes of an + // optimizing compiler. This is achieved by returning a pointer from + // from outside as obtained by `super::alloc`. + fn new_uninit_bytes(size: usize) -> Self { + unsafe { + // Mustn't call alloc with size 0. + let ptr = if size > 0 { + // `copy_to_userspace` is more efficient when data is 8-byte aligned + let alignment = cmp::max(T::align_of(), 8); + rtunwrap!(Ok, super::alloc(size, alignment)) as _ + } else { + T::align_of() as _ // dangling pointer ok for size 0 + }; + if let Ok(v) = crate::panic::catch_unwind(|| T::from_raw_sized(ptr, size)) { + User(NonNull::new_userref(v)) + } else { + rtabort!("Got invalid pointer from alloc() usercall") + } + } + } + + /// Copies `val` into freshly allocated space in user memory. + pub fn new_from_enclave(val: &T) -> Self { + unsafe { + let mut user = Self::new_uninit_bytes(mem::size_of_val(val)); + user.copy_from_enclave(val); + user + } + } + + /// Creates an owned `User<T>` from a raw pointer. + /// + /// # Safety + /// The caller must ensure `ptr` points to `T`, is freeable with the `free` + /// usercall and the alignment of `T`, and is uniquely owned. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw(ptr: *mut T) -> Self { + // SAFETY: the caller must uphold the safety contract for `from_raw`. + unsafe { T::check_ptr(ptr) }; + User(unsafe { NonNull::new_userref(ptr) }) + } + + /// Converts this value into a raw pointer. The value will no longer be + /// automatically freed. + pub fn into_raw(self) -> *mut T { + let ret = self.0; + mem::forget(self); + ret.as_ptr() as _ + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> User<T> +where + T: UserSafe, +{ + /// Allocate space for `T` in user memory. + pub fn uninitialized() -> Self { + Self::new_uninit_bytes(mem::size_of::<T>()) + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> User<[T]> +where + [T]: UserSafe, +{ + /// Allocate space for a `[T]` of `n` elements in user memory. + pub fn uninitialized(n: usize) -> Self { + Self::new_uninit_bytes(n * mem::size_of::<T>()) + } + + /// Creates an owned `User<[T]>` from a raw thin pointer and a slice length. + /// + /// # Safety + /// The caller must ensure `ptr` points to `len` elements of `T`, is + /// freeable with the `free` usercall and the alignment of `T`, and is + /// uniquely owned. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw_parts(ptr: *mut T, len: usize) -> Self { + User(unsafe { + NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>())) + }) + } +} + +/// Copies `len` bytes of data from enclave pointer `src` to userspace `dst` +/// +/// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either: +/// - preceded by the VERW instruction and followed by the MFENCE; LFENCE instruction sequence +/// - or are in multiples of 8 bytes, aligned to an 8-byte boundary +/// +/// # Panics +/// This function panics if: +/// +/// * The `src` pointer is null +/// * The `dst` pointer is null +/// * The `src` memory range is not in enclave memory +/// * The `dst` memory range is not in user memory +/// +/// # References +/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00615.html +/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/processor-mmio-stale-data-vulnerabilities.html#inpage-nav-3-2-2 +pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) { + unsafe fn copy_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) { + unsafe { + let mut seg_sel: u16 = 0; + for off in 0..len { + asm!(" + mov %ds, ({seg_sel}) + verw ({seg_sel}) + movb {val}, ({dst}) + mfence + lfence + ", + val = in(reg_byte) *src.offset(off as isize), + dst = in(reg) dst.offset(off as isize), + seg_sel = in(reg) &mut seg_sel, + options(nostack, att_syntax) + ); + } + } + } + + unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) { + unsafe { + asm!( + "rep movsq (%rsi), (%rdi)", + inout("rcx") len / 8 => _, + inout("rdi") dst => _, + inout("rsi") src => _, + options(att_syntax, nostack, preserves_flags) + ); + } + } + assert!(!src.is_null()); + assert!(!dst.is_null()); + assert!(is_enclave_range(src, len)); + assert!(is_user_range(dst, len)); + assert!(len < isize::MAX as usize); + assert!(!(src as usize).overflowing_add(len).1); + assert!(!(dst as usize).overflowing_add(len).1); + + if len < 8 { + // Can't align on 8 byte boundary: copy safely byte per byte + unsafe { + copy_bytewise_to_userspace(src, dst, len); + } + } else if len % 8 == 0 && dst as usize % 8 == 0 { + // Copying 8-byte aligned quadwords: copy quad word per quad word + unsafe { + copy_aligned_quadwords_to_userspace(src, dst, len); + } + } else { + // Split copies into three parts: + // +--------+ + // | small0 | Chunk smaller than 8 bytes + // +--------+ + // | big | Chunk 8-byte aligned, and size a multiple of 8 bytes + // +--------+ + // | small1 | Chunk smaller than 8 bytes + // +--------+ + + unsafe { + // Copy small0 + let small0_size = (8 - dst as usize % 8) as u8; + let small0_src = src; + let small0_dst = dst; + copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _); + + // Copy big + let small1_size = ((len - small0_size as usize) % 8) as u8; + let big_size = len - small0_size as usize - small1_size as usize; + let big_src = src.offset(small0_size as _); + let big_dst = dst.offset(small0_size as _); + copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size); + + // Copy small1 + let small1_src = src.offset(big_size as isize + small0_size as isize); + let small1_dst = dst.offset(big_size as isize + small0_size as isize); + copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _); + } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> UserRef<T> +where + T: UserSafe, +{ + /// Creates a `&UserRef<[T]>` from a raw pointer. + /// + /// # Safety + /// The caller must ensure `ptr` points to `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range is not in user memory + pub unsafe fn from_ptr<'a>(ptr: *const T) -> &'a Self { + // SAFETY: The caller must uphold the safety contract for `from_ptr`. + unsafe { T::check_ptr(ptr) }; + unsafe { &*(ptr as *const Self) } + } + + /// Creates a `&mut UserRef<[T]>` from a raw pointer. See the struct + /// documentation for the nuances regarding a `&mut UserRef<T>`. + /// + /// # Safety + /// The caller must ensure `ptr` points to `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range is not in user memory + pub unsafe fn from_mut_ptr<'a>(ptr: *mut T) -> &'a mut Self { + // SAFETY: The caller must uphold the safety contract for `from_mut_ptr`. + unsafe { T::check_ptr(ptr) }; + unsafe { &mut *(ptr as *mut Self) } + } + + /// Copies `val` into user memory. + /// + /// # Panics + /// This function panics if the destination doesn't have the same size as + /// the source. This can happen for dynamically-sized types such as slices. + pub fn copy_from_enclave(&mut self, val: &T) { + unsafe { + assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get())); + copy_to_userspace( + val as *const T as *const u8, + self.0.get() as *mut T as *mut u8, + mem::size_of_val(val), + ); + } + } + + /// Copies the value from user memory and place it into `dest`. + /// + /// # Panics + /// This function panics if the destination doesn't have the same size as + /// the source. This can happen for dynamically-sized types such as slices. + pub fn copy_to_enclave(&self, dest: &mut T) { + unsafe { + assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get())); + ptr::copy( + self.0.get() as *const T as *const u8, + dest as *mut T as *mut u8, + mem::size_of_val(dest), + ); + } + } + + /// Obtain a raw pointer from this reference. + pub fn as_raw_ptr(&self) -> *const T { + self as *const _ as _ + } + + /// Obtain a raw pointer from this reference. + pub fn as_raw_mut_ptr(&mut self) -> *mut T { + self as *mut _ as _ + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> UserRef<T> +where + T: UserSafe, +{ + /// Copies the value from user memory into enclave memory. + pub fn to_enclave(&self) -> T { + unsafe { ptr::read(self.0.get()) } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> UserRef<[T]> +where + [T]: UserSafe, +{ + /// Creates a `&UserRef<[T]>` from a raw thin pointer and a slice length. + /// + /// # Safety + /// The caller must ensure `ptr` points to `n` elements of `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw_parts<'a>(ptr: *const T, len: usize) -> &'a Self { + // SAFETY: The caller must uphold the safety contract for `from_raw_parts`. + unsafe { + &*(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *const Self) + } + } + + /// Creates a `&mut UserRef<[T]>` from a raw thin pointer and a slice length. + /// See the struct documentation for the nuances regarding a + /// `&mut UserRef<T>`. + /// + /// # Safety + /// The caller must ensure `ptr` points to `n` elements of `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut T, len: usize) -> &'a mut Self { + // SAFETY: The caller must uphold the safety contract for `from_raw_parts_mut`. + unsafe { + &mut *(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *mut Self) + } + } + + /// Obtain a raw pointer to the first element of this user slice. + pub fn as_ptr(&self) -> *const T { + self.0.get() as _ + } + + /// Obtain a raw pointer to the first element of this user slice. + pub fn as_mut_ptr(&mut self) -> *mut T { + self.0.get() as _ + } + + /// Obtain the number of elements in this user slice. + pub fn len(&self) -> usize { + unsafe { (*self.0.get()).len() } + } + + /// Copies the value from user memory and place it into `dest`. Afterwards, + /// `dest` will contain exactly `self.len()` elements. + /// + /// # Panics + /// This function panics if the destination doesn't have the same size as + /// the source. This can happen for dynamically-sized types such as slices. + pub fn copy_to_enclave_vec(&self, dest: &mut Vec<T>) { + if let Some(missing) = self.len().checked_sub(dest.capacity()) { + dest.reserve(missing) + } + // SAFETY: We reserve enough space above. + unsafe { dest.set_len(self.len()) }; + self.copy_to_enclave(&mut dest[..]); + } + + /// Copies the value from user memory into a vector in enclave memory. + pub fn to_enclave(&self) -> Vec<T> { + let mut ret = Vec::with_capacity(self.len()); + self.copy_to_enclave_vec(&mut ret); + ret + } + + /// Returns an iterator over the slice. + pub fn iter(&self) -> Iter<'_, T> + where + T: UserSafe, // FIXME: should be implied by [T]: UserSafe? + { + unsafe { Iter((&*self.as_raw_ptr()).iter()) } + } + + /// Returns an iterator that allows modifying each value. + pub fn iter_mut(&mut self) -> IterMut<'_, T> + where + T: UserSafe, // FIXME: should be implied by [T]: UserSafe? + { + unsafe { IterMut((&mut *self.as_raw_mut_ptr()).iter_mut()) } + } +} + +/// Immutable user slice iterator +/// +/// This struct is created by the `iter` method on `UserRef<[T]>`. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct Iter<'a, T: 'a + UserSafe>(slice::Iter<'a, T>); + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<'a, T: UserSafe> Iterator for Iter<'a, T> { + type Item = &'a UserRef<T>; + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + unsafe { self.0.next().map(|e| UserRef::from_ptr(e)) } + } +} + +/// Mutable user slice iterator +/// +/// This struct is created by the `iter_mut` method on `UserRef<[T]>`. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct IterMut<'a, T: 'a + UserSafe>(slice::IterMut<'a, T>); + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<'a, T: UserSafe> Iterator for IterMut<'a, T> { + type Item = &'a mut UserRef<T>; + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + unsafe { self.0.next().map(|e| UserRef::from_mut_ptr(e)) } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> Deref for User<T> +where + T: UserSafe, +{ + type Target = UserRef<T>; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.0.as_ptr() } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> DerefMut for User<T> +where + T: UserSafe, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.0.as_ptr() } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> Drop for User<T> +where + T: UserSafe, +{ + fn drop(&mut self) { + unsafe { + let ptr = (*self.0.as_ptr()).0.get(); + super::free(ptr as _, mem::size_of_val(&mut *ptr), T::align_of()); + } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: CoerceUnsized<U>, U> CoerceUnsized<UserRef<U>> for UserRef<T> {} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T, I> Index<I> for UserRef<[T]> +where + [T]: UserSafe, + I: SliceIndex<[T]>, + I::Output: UserSafe, +{ + type Output = UserRef<I::Output>; + + #[inline] + fn index(&self, index: I) -> &UserRef<I::Output> { + unsafe { + if let Some(slice) = index.get(&*self.as_raw_ptr()) { + UserRef::from_ptr(slice) + } else { + rtabort!("index out of range for user slice"); + } + } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T, I> IndexMut<I> for UserRef<[T]> +where + [T]: UserSafe, + I: SliceIndex<[T]>, + I::Output: UserSafe, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut UserRef<I::Output> { + unsafe { + if let Some(slice) = index.get_mut(&mut *self.as_raw_mut_ptr()) { + UserRef::from_mut_ptr(slice) + } else { + rtabort!("index out of range for user slice"); + } + } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl UserRef<super::raw::ByteBuffer> { + /// Copies the user memory range pointed to by the user `ByteBuffer` to + /// enclave memory. + /// + /// # Panics + /// This function panics if, in the user `ByteBuffer`: + /// + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub fn copy_user_buffer(&self) -> Vec<u8> { + unsafe { + let buf = self.to_enclave(); + if buf.len > 0 { + User::from_raw_parts(buf.data as _, buf.len).to_enclave() + } else { + // Mustn't look at `data` or call `free` if `len` is `0`. + Vec::with_capacity(0) + } + } + } +} diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs new file mode 100644 index 000000000..79d1db5e1 --- /dev/null +++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs @@ -0,0 +1,323 @@ +use crate::cmp; +use crate::io::{Error as IoError, ErrorKind, IoSlice, IoSliceMut, Result as IoResult}; +use crate::sys::rand::rdrand64; +use crate::time::{Duration, Instant}; + +pub(crate) mod alloc; +#[macro_use] +pub(crate) mod raw; +#[cfg(test)] +mod tests; + +use self::raw::*; + +/// Usercall `read`. See the ABI documentation for more information. +/// +/// This will do a single `read` usercall and scatter the read data among +/// `bufs`. To read to a single buffer, just pass a slice of length one. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn read(fd: Fd, bufs: &mut [IoSliceMut<'_>]) -> IoResult<usize> { + unsafe { + let total_len = bufs.iter().fold(0usize, |sum, buf| sum.saturating_add(buf.len())); + let mut userbuf = alloc::User::<[u8]>::uninitialized(total_len); + let ret_len = raw::read(fd, userbuf.as_mut_ptr(), userbuf.len()).from_sgx_result()?; + let userbuf = &userbuf[..ret_len]; + let mut index = 0; + for buf in bufs { + let end = cmp::min(index + buf.len(), userbuf.len()); + if let Some(buflen) = end.checked_sub(index) { + userbuf[index..end].copy_to_enclave(&mut buf[..buflen]); + index += buf.len(); + } else { + break; + } + } + Ok(userbuf.len()) + } +} + +/// Usercall `read_alloc`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn read_alloc(fd: Fd) -> IoResult<Vec<u8>> { + unsafe { + let userbuf = ByteBuffer { data: crate::ptr::null_mut(), len: 0 }; + let mut userbuf = alloc::User::new_from_enclave(&userbuf); + raw::read_alloc(fd, userbuf.as_raw_mut_ptr()).from_sgx_result()?; + Ok(userbuf.copy_user_buffer()) + } +} + +/// Usercall `write`. See the ABI documentation for more information. +/// +/// This will do a single `write` usercall and gather the written data from +/// `bufs`. To write from a single buffer, just pass a slice of length one. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn write(fd: Fd, bufs: &[IoSlice<'_>]) -> IoResult<usize> { + unsafe { + let total_len = bufs.iter().fold(0usize, |sum, buf| sum.saturating_add(buf.len())); + let mut userbuf = alloc::User::<[u8]>::uninitialized(total_len); + let mut index = 0; + for buf in bufs { + let end = cmp::min(index + buf.len(), userbuf.len()); + if let Some(buflen) = end.checked_sub(index) { + userbuf[index..end].copy_from_enclave(&buf[..buflen]); + index += buf.len(); + } else { + break; + } + } + raw::write(fd, userbuf.as_ptr(), userbuf.len()).from_sgx_result() + } +} + +/// Usercall `flush`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn flush(fd: Fd) -> IoResult<()> { + unsafe { raw::flush(fd).from_sgx_result() } +} + +/// Usercall `close`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn close(fd: Fd) { + unsafe { raw::close(fd) } +} + +fn string_from_bytebuffer(buf: &alloc::UserRef<ByteBuffer>, usercall: &str, arg: &str) -> String { + String::from_utf8(buf.copy_user_buffer()) + .unwrap_or_else(|_| rtabort!("Usercall {usercall}: expected {arg} to be valid UTF-8")) +} + +/// Usercall `bind_stream`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn bind_stream(addr: &str) -> IoResult<(Fd, String)> { + unsafe { + let addr_user = alloc::User::new_from_enclave(addr.as_bytes()); + let mut local = alloc::User::<ByteBuffer>::uninitialized(); + let fd = raw::bind_stream(addr_user.as_ptr(), addr_user.len(), local.as_raw_mut_ptr()) + .from_sgx_result()?; + let local = string_from_bytebuffer(&local, "bind_stream", "local_addr"); + Ok((fd, local)) + } +} + +/// Usercall `accept_stream`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn accept_stream(fd: Fd) -> IoResult<(Fd, String, String)> { + unsafe { + let mut bufs = alloc::User::<[ByteBuffer; 2]>::uninitialized(); + let mut buf_it = alloc::UserRef::iter_mut(&mut *bufs); // FIXME: can this be done + // without forcing coercion? + let (local, peer) = (buf_it.next().unwrap(), buf_it.next().unwrap()); + let fd = raw::accept_stream(fd, local.as_raw_mut_ptr(), peer.as_raw_mut_ptr()) + .from_sgx_result()?; + let local = string_from_bytebuffer(&local, "accept_stream", "local_addr"); + let peer = string_from_bytebuffer(&peer, "accept_stream", "peer_addr"); + Ok((fd, local, peer)) + } +} + +/// Usercall `connect_stream`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn connect_stream(addr: &str) -> IoResult<(Fd, String, String)> { + unsafe { + let addr_user = alloc::User::new_from_enclave(addr.as_bytes()); + let mut bufs = alloc::User::<[ByteBuffer; 2]>::uninitialized(); + let mut buf_it = alloc::UserRef::iter_mut(&mut *bufs); // FIXME: can this be done + // without forcing coercion? + let (local, peer) = (buf_it.next().unwrap(), buf_it.next().unwrap()); + let fd = raw::connect_stream( + addr_user.as_ptr(), + addr_user.len(), + local.as_raw_mut_ptr(), + peer.as_raw_mut_ptr(), + ) + .from_sgx_result()?; + let local = string_from_bytebuffer(&local, "connect_stream", "local_addr"); + let peer = string_from_bytebuffer(&peer, "connect_stream", "peer_addr"); + Ok((fd, local, peer)) + } +} + +/// Usercall `launch_thread`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub unsafe fn launch_thread() -> IoResult<()> { + // SAFETY: The caller must uphold the safety contract for `launch_thread`. + unsafe { raw::launch_thread().from_sgx_result() } +} + +/// Usercall `exit`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn exit(panic: bool) -> ! { + unsafe { raw::exit(panic) } +} + +/// Usercall `wait`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn wait(event_mask: u64, mut timeout: u64) -> IoResult<u64> { + if timeout != WAIT_NO && timeout != WAIT_INDEFINITE { + // We don't want people to rely on accuracy of timeouts to make + // security decisions in an SGX enclave. That's why we add a random + // amount not exceeding +/- 10% to the timeout value to discourage + // people from relying on accuracy of timeouts while providing a way + // to make things work in other cases. Note that in the SGX threat + // model the enclave runner which is serving the wait usercall is not + // trusted to ensure accurate timeouts. + if let Ok(timeout_signed) = i64::try_from(timeout) { + let tenth = timeout_signed / 10; + let deviation = (rdrand64() as i64).checked_rem(tenth).unwrap_or(0); + timeout = timeout_signed.saturating_add(deviation) as _; + } + } + unsafe { raw::wait(event_mask, timeout).from_sgx_result() } +} + +/// This function makes an effort to wait for a non-spurious event at least as +/// long as `duration`. Note that in general there is no guarantee about accuracy +/// of time and timeouts in SGX model. The enclave runner serving usercalls may +/// lie about current time and/or ignore timeout values. +/// +/// Once the event is observed, `should_wake_up` will be used to determine +/// whether or not the event was spurious. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn wait_timeout<F>(event_mask: u64, duration: Duration, should_wake_up: F) +where + F: Fn() -> bool, +{ + // Calls the wait usercall and checks the result. Returns true if event was + // returned, and false if WouldBlock/TimedOut was returned. + // If duration is None, it will use WAIT_NO. + fn wait_checked(event_mask: u64, duration: Option<Duration>) -> bool { + let timeout = duration.map_or(raw::WAIT_NO, |duration| { + cmp::min((u64::MAX - 1) as u128, duration.as_nanos()) as u64 + }); + match wait(event_mask, timeout) { + Ok(eventset) => { + if event_mask == 0 { + rtabort!("expected wait() to return Err, found Ok."); + } + rtassert!(eventset != 0 && eventset & !event_mask == 0); + true + } + Err(e) => { + rtassert!(e.kind() == ErrorKind::TimedOut || e.kind() == ErrorKind::WouldBlock); + false + } + } + } + + match wait_checked(event_mask, Some(duration)) { + false => return, // timed out + true if should_wake_up() => return, // woken up + true => {} // spurious event + } + + // Drain all cached events. + // Note that `event_mask != 0` is implied if we get here. + loop { + match wait_checked(event_mask, None) { + false => break, // no more cached events + true if should_wake_up() => return, // woken up + true => {} // spurious event + } + } + + // Continue waiting, but take note of time spent waiting so we don't wait + // forever. We intentionally don't call `Instant::now()` before this point + // to avoid the cost of the `insecure_time` usercall in case there are no + // spurious wakeups. + + let start = Instant::now(); + let mut remaining = duration; + loop { + match wait_checked(event_mask, Some(remaining)) { + false => return, // timed out + true if should_wake_up() => return, // woken up + true => {} // spurious event + } + remaining = match duration.checked_sub(start.elapsed()) { + Some(remaining) => remaining, + None => break, + } + } +} + +/// Usercall `send`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn send(event_set: u64, tcs: Option<Tcs>) -> IoResult<()> { + unsafe { raw::send(event_set, tcs).from_sgx_result() } +} + +/// Usercall `insecure_time`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn insecure_time() -> Duration { + let t = unsafe { raw::insecure_time() }; + Duration::new(t / 1_000_000_000, (t % 1_000_000_000) as _) +} + +/// Usercall `alloc`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn alloc(size: usize, alignment: usize) -> IoResult<*mut u8> { + unsafe { raw::alloc(size, alignment).from_sgx_result() } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +#[doc(inline)] +pub use self::raw::free; + +fn check_os_error(err: Result) -> i32 { + // FIXME: not sure how to make sure all variants of Error are covered + if err == Error::NotFound as _ + || err == Error::PermissionDenied as _ + || err == Error::ConnectionRefused as _ + || err == Error::ConnectionReset as _ + || err == Error::ConnectionAborted as _ + || err == Error::NotConnected as _ + || err == Error::AddrInUse as _ + || err == Error::AddrNotAvailable as _ + || err == Error::BrokenPipe as _ + || err == Error::AlreadyExists as _ + || err == Error::WouldBlock as _ + || err == Error::InvalidInput as _ + || err == Error::InvalidData as _ + || err == Error::TimedOut as _ + || err == Error::WriteZero as _ + || err == Error::Interrupted as _ + || err == Error::Other as _ + || err == Error::UnexpectedEof as _ + || ((Error::UserRangeStart as _)..=(Error::UserRangeEnd as _)).contains(&err) + { + err + } else { + rtabort!("Usercall: returned invalid error value {err}") + } +} + +trait FromSgxResult { + type Return; + + fn from_sgx_result(self) -> IoResult<Self::Return>; +} + +impl<T> FromSgxResult for (Result, T) { + type Return = T; + + fn from_sgx_result(self) -> IoResult<Self::Return> { + if self.0 == RESULT_SUCCESS { + Ok(self.1) + } else { + Err(IoError::from_raw_os_error(check_os_error(self.0))) + } + } +} + +impl FromSgxResult for Result { + type Return = (); + + fn from_sgx_result(self) -> IoResult<Self::Return> { + if self == RESULT_SUCCESS { + Ok(()) + } else { + Err(IoError::from_raw_os_error(check_os_error(self))) + } + } +} diff --git a/library/std/src/sys/sgx/abi/usercalls/raw.rs b/library/std/src/sys/sgx/abi/usercalls/raw.rs new file mode 100644 index 000000000..4267b96cc --- /dev/null +++ b/library/std/src/sys/sgx/abi/usercalls/raw.rs @@ -0,0 +1,251 @@ +#![allow(unused)] + +#[unstable(feature = "sgx_platform", issue = "56975")] +pub use fortanix_sgx_abi::*; + +use crate::num::NonZeroU64; +use crate::ptr::NonNull; + +#[repr(C)] +struct UsercallReturn(u64, u64); + +extern "C" { + fn usercall(nr: NonZeroU64, p1: u64, p2: u64, abort: u64, p3: u64, p4: u64) -> UsercallReturn; +} + +/// Performs the raw usercall operation as defined in the ABI calling convention. +/// +/// # Safety +/// +/// The caller must ensure to pass parameters appropriate for the usercall `nr` +/// and to observe all requirements specified in the ABI. +/// +/// # Panics +/// +/// Panics if `nr` is `0`. +#[unstable(feature = "sgx_platform", issue = "56975")] +#[inline] +pub unsafe fn do_usercall( + nr: NonZeroU64, + p1: u64, + p2: u64, + p3: u64, + p4: u64, + abort: bool, +) -> (u64, u64) { + let UsercallReturn(a, b) = unsafe { usercall(nr, p1, p2, abort as _, p3, p4) }; + (a, b) +} + +type Register = u64; + +trait RegisterArgument { + fn from_register(_: Register) -> Self; + fn into_register(self) -> Register; +} + +trait ReturnValue { + fn from_registers(call: &'static str, regs: (Register, Register)) -> Self; +} + +macro_rules! define_usercalls { + ($(fn $f:ident($($n:ident: $t:ty),*) $(-> $r:tt)*; )*) => { + /// Usercall numbers as per the ABI. + #[repr(u64)] + #[unstable(feature = "sgx_platform", issue = "56975")] + #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] + #[allow(missing_docs, non_camel_case_types)] + #[non_exhaustive] + pub enum Usercalls { + #[doc(hidden)] + __enclave_usercalls_invalid = 0, + $($f,)* + } + + $(enclave_usercalls_internal_define_usercalls!(def fn $f($($n: $t),*) $(-> $r)*);)* + }; +} + +macro_rules! define_ra { + (< $i:ident > $t:ty) => { + impl<$i> RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as _ + } + fn into_register(self) -> Register { + self as _ + } + } + }; + ($i:ty as $t:ty) => { + impl RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as $i as _ + } + fn into_register(self) -> Register { + self as $i as _ + } + } + }; + ($t:ty) => { + impl RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as _ + } + fn into_register(self) -> Register { + self as _ + } + } + }; +} + +define_ra!(Register); +define_ra!(i64); +define_ra!(u32); +define_ra!(u32 as i32); +define_ra!(u16); +define_ra!(u16 as i16); +define_ra!(u8); +define_ra!(u8 as i8); +define_ra!(usize); +define_ra!(usize as isize); +define_ra!(<T> *const T); +define_ra!(<T> *mut T); + +impl RegisterArgument for bool { + fn from_register(a: Register) -> bool { + if a != 0 { true } else { false } + } + fn into_register(self) -> Register { + self as _ + } +} + +impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> { + fn from_register(a: Register) -> Option<NonNull<T>> { + NonNull::new(a as _) + } + fn into_register(self) -> Register { + self.map_or(0 as _, NonNull::as_ptr) as _ + } +} + +impl ReturnValue for ! { + fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self { + rtabort!("Usercall {call}: did not expect to be re-entered"); + } +} + +impl ReturnValue for () { + fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self { + rtassert!(usercall_retval.0 == 0); + rtassert!(usercall_retval.1 == 0); + () + } +} + +impl<T: RegisterArgument> ReturnValue for T { + fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self { + rtassert!(usercall_retval.1 == 0); + T::from_register(usercall_retval.0) + } +} + +impl<T: RegisterArgument, U: RegisterArgument> ReturnValue for (T, U) { + fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self { + (T::from_register(regs.0), U::from_register(regs.1)) + } +} + +macro_rules! return_type_is_abort { + (!) => { + true + }; + ($r:ty) => { + false + }; +} + +// In this macro: using `$r:tt` because `$r:ty` doesn't match ! in `return_type_is_abort` +macro_rules! enclave_usercalls_internal_define_usercalls { + (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty, + $n3:ident: $t3:ty, $n4:ident: $t4:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3, $n4: $t4) -> $r { + ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + RegisterArgument::into_register($n2), + RegisterArgument::into_register($n3), + RegisterArgument::into_register($n4), + return_type_is_abort!($r) + ) }) + } + ); + (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty, $n3:ident: $t3:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3) -> $r { + ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + RegisterArgument::into_register($n2), + RegisterArgument::into_register($n3), + 0, + return_type_is_abort!($r) + ) }) + } + ); + (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1, $n2: $t2) -> $r { + ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + RegisterArgument::into_register($n2), + 0,0, + return_type_is_abort!($r) + ) }) + } + ); + (def fn $f:ident($n1:ident: $t1:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1) -> $r { + ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + 0,0,0, + return_type_is_abort!($r) + ) }) + } + ); + (def fn $f:ident() -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f() -> $r { + ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + 0,0,0,0, + return_type_is_abort!($r) + ) }) + } + ); + (def fn $f:ident($($n:ident: $t:ty),*)) => ( + enclave_usercalls_internal_define_usercalls!(def fn $f($($n: $t),*) -> ()); + ); +} + +invoke_with_usercalls!(define_usercalls); diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs new file mode 100644 index 000000000..cbf7d7d54 --- /dev/null +++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs @@ -0,0 +1,30 @@ +use super::alloc::copy_to_userspace; +use super::alloc::User; + +#[test] +fn test_copy_function() { + let mut src = [0u8; 100]; + let mut dst = User::<[u8]>::uninitialized(100); + + for i in 0..src.len() { + src[i] = i as _; + } + + for size in 0..48 { + // For all possible alignment + for offset in 0..8 { + // overwrite complete dst + dst.copy_from_enclave(&[0u8; 100]); + + // Copy src[0..size] to dst + offset + unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) }; + + // Verify copy + for byte in 0..size { + unsafe { + assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]); + } + } + } + } +} |