diff options
Diffstat (limited to 'compiler/rustc_codegen_gcc/example')
11 files changed, 2087 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_gcc/example/alloc_example.rs b/compiler/rustc_codegen_gcc/example/alloc_example.rs new file mode 100644 index 000000000..74ea7ec4e --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/alloc_example.rs @@ -0,0 +1,41 @@ +#![feature(start, box_syntax, core_intrinsics, alloc_error_handler)] +#![no_std] + +extern crate alloc; +extern crate alloc_system; + +use alloc::boxed::Box; + +use alloc_system::System; + +#[global_allocator] +static ALLOC: System = System; + +#[link(name = "c")] +extern "C" { + fn puts(s: *const u8) -> i32; +} + +#[panic_handler] +fn panic_handler(_: &core::panic::PanicInfo) -> ! { + unsafe { + core::intrinsics::abort(); + } +} + +#[alloc_error_handler] +fn alloc_error_handler(_: alloc::alloc::Layout) -> ! { + unsafe { + core::intrinsics::abort(); + } +} + +#[start] +fn main(_argc: isize, _argv: *const *const u8) -> isize { + let world: Box<&str> = box "Hello World!\0"; + unsafe { + puts(*world as *const str as *const u8); + } + + 0 +} diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs new file mode 100644 index 000000000..5f66ca67f --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/alloc_system.rs @@ -0,0 +1,212 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![no_std] +#![feature(allocator_api, rustc_private)] +#![cfg_attr(any(unix, target_os = "redox"), feature(libc))] + +// The minimum alignment guaranteed by the architecture. This value is used to +// add fast paths for low alignment values. +#[cfg(all(any(target_arch = "x86", + target_arch = "arm", + target_arch = "mips", + target_arch = "powerpc", + target_arch = "powerpc64")))] +const MIN_ALIGN: usize = 8; +#[cfg(all(any(target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "mips64", + target_arch = "s390x", + target_arch = "sparc64")))] +const MIN_ALIGN: usize = 16; + +pub struct System; +#[cfg(any(windows, unix, target_os = "redox"))] +mod realloc_fallback { + use core::alloc::{GlobalAlloc, Layout}; + use core::cmp; + use core::ptr; + impl super::System { + pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout, + new_size: usize) -> *mut u8 { + // Docs for GlobalAlloc::realloc require this to be valid: + let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); + let new_ptr = GlobalAlloc::alloc(self, new_layout); + if !new_ptr.is_null() { + let size = cmp::min(old_layout.size(), new_size); + ptr::copy_nonoverlapping(ptr, new_ptr, size); + GlobalAlloc::dealloc(self, ptr, old_layout); + } + new_ptr + } + } +} +#[cfg(any(unix, target_os = "redox"))] +mod platform { + extern crate libc; + use core::ptr; + use MIN_ALIGN; + use System; + use core::alloc::{GlobalAlloc, Layout}; + unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::malloc(layout.size()) as *mut u8 + } else { + #[cfg(target_os = "macos")] + { + if layout.align() > (1 << 31) { + return ptr::null_mut() + } + } + aligned_malloc(&layout) + } + } + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::calloc(layout.size(), 1) as *mut u8 + } else { + let ptr = self.alloc(layout.clone()); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, layout.size()); + } + ptr + } + } + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + libc::free(ptr as *mut libc::c_void) + } + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= new_size { + libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 + } else { + self.realloc_fallback(ptr, layout, new_size) + } + } + } + #[cfg(any(target_os = "android", + target_os = "hermit", + target_os = "redox", + target_os = "solaris"))] + #[inline] + unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { + // On android we currently target API level 9 which unfortunately + // doesn't have the `posix_memalign` API used below. Instead we use + // `memalign`, but this unfortunately has the property on some systems + // where the memory returned cannot be deallocated by `free`! + // + // Upon closer inspection, however, this appears to work just fine with + // Android, so for this platform we should be fine to call `memalign` + // (which is present in API level 9). Some helpful references could + // possibly be chromium using memalign [1], attempts at documenting that + // memalign + free is ok [2] [3], or the current source of chromium + // which still uses memalign on android [4]. + // + // [1]: https://codereview.chromium.org/10796020/ + // [2]: https://code.google.com/p/android/issues/detail?id=35391 + // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579 + // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/ + // /memory/aligned_memory.cc + libc::memalign(layout.align(), layout.size()) as *mut u8 + } + #[cfg(not(any(target_os = "android", + target_os = "hermit", + target_os = "redox", + target_os = "solaris")))] + #[inline] + unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { + let mut out = ptr::null_mut(); + let ret = libc::posix_memalign(&mut out, layout.align(), layout.size()); + if ret != 0 { + ptr::null_mut() + } else { + out as *mut u8 + } + } +} +#[cfg(windows)] +#[allow(nonstandard_style)] +mod platform { + use MIN_ALIGN; + use System; + use core::alloc::{GlobalAlloc, Layout}; + type LPVOID = *mut u8; + type HANDLE = LPVOID; + type SIZE_T = usize; + type DWORD = u32; + type BOOL = i32; + extern "system" { + fn GetProcessHeap() -> HANDLE; + fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID; + fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID; + fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; + fn GetLastError() -> DWORD; + } + #[repr(C)] + struct Header(*mut u8); + const HEAP_ZERO_MEMORY: DWORD = 0x00000008; + unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { + &mut *(ptr as *mut Header).offset(-1) + } + unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { + let aligned = ptr.add(align - (ptr as usize & (align - 1))); + *get_header(aligned) = Header(ptr); + aligned + } + #[inline] + unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 { + let ptr = if layout.align() <= MIN_ALIGN { + HeapAlloc(GetProcessHeap(), flags, layout.size()) + } else { + let size = layout.size() + layout.align(); + let ptr = HeapAlloc(GetProcessHeap(), flags, size); + if ptr.is_null() { + ptr + } else { + align_ptr(ptr, layout.align()) + } + }; + ptr as *mut u8 + } + unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + allocate_with_flags(layout, 0) + } + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + allocate_with_flags(layout, HEAP_ZERO_MEMORY) + } + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + if layout.align() <= MIN_ALIGN { + let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID); + debug_assert!(err != 0, "Failed to free heap memory: {}", + GetLastError()); + } else { + let header = get_header(ptr); + let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID); + debug_assert!(err != 0, "Failed to free heap memory: {}", + GetLastError()); + } + } + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN { + HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8 + } else { + self.realloc_fallback(ptr, layout, new_size) + } + } + } +} diff --git a/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs new file mode 100644 index 000000000..3af0ba09e --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs @@ -0,0 +1,69 @@ +// Adapted from rustc run-pass test suite + +#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)] +#![feature(rustc_attrs)] + +use std::{ + ops::{Deref, CoerceUnsized, DispatchFromDyn}, + marker::Unsize, +}; + +struct Ptr<T: ?Sized>(Box<T>); + +impl<T: ?Sized> Deref for Ptr<T> { + type Target = T; + + fn deref(&self) -> &T { + &*self.0 + } +} + +impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {} +impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {} + +struct Wrapper<T: ?Sized>(T); + +impl<T: ?Sized> Deref for Wrapper<T> { + type Target = T; + + fn deref(&self) -> &T { + &self.0 + } +} + +impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {} +impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {} + + +trait Trait { + // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable + // without unsized_locals), but wrappers around `Self` currently are not. + // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented + // fn wrapper(self: Wrapper<Self>) -> i32; + fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32; + fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32; + fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32; +} + +impl Trait for i32 { + fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 { + **self + } + fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 { + **self + } + fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 { + ***self + } +} + +fn main() { + let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>; + assert_eq!(pw.ptr_wrapper(), 5); + + let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>; + assert_eq!(wp.wrapper_ptr(), 6); + + let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>; + assert_eq!(wpw.wrapper_ptr_wrapper(), 7); +} diff --git a/compiler/rustc_codegen_gcc/example/dst-field-align.rs b/compiler/rustc_codegen_gcc/example/dst-field-align.rs new file mode 100644 index 000000000..6c338e999 --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/dst-field-align.rs @@ -0,0 +1,67 @@ +// run-pass +#![allow(dead_code)] +struct Foo<T: ?Sized> { + a: u16, + b: T +} + +trait Bar { + fn get(&self) -> usize; +} + +impl Bar for usize { + fn get(&self) -> usize { *self } +} + +struct Baz<T: ?Sized> { + a: T +} + +struct HasDrop<T: ?Sized> { + ptr: Box<usize>, + data: T +} + +fn main() { + // Test that zero-offset works properly + let b : Baz<usize> = Baz { a: 7 }; + assert_eq!(b.a.get(), 7); + let b : &Baz<dyn Bar> = &b; + assert_eq!(b.a.get(), 7); + + // Test that the field is aligned properly + let f : Foo<usize> = Foo { a: 0, b: 11 }; + assert_eq!(f.b.get(), 11); + let ptr1 : *const u8 = &f.b as *const _ as *const u8; + + let f : &Foo<dyn Bar> = &f; + let ptr2 : *const u8 = &f.b as *const _ as *const u8; + assert_eq!(f.b.get(), 11); + + // The pointers should be the same + assert_eq!(ptr1, ptr2); + + // Test that nested DSTs work properly + let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }}; + assert_eq!(f.b.b.get(), 17); + let f : &Foo<Foo<dyn Bar>> = &f; + assert_eq!(f.b.b.get(), 17); + + // Test that get the pointer via destructuring works + + let f : Foo<usize> = Foo { a: 0, b: 11 }; + let f : &Foo<dyn Bar> = &f; + let &Foo { a: _, b: ref bar } = f; + assert_eq!(bar.get(), 11); + + // Make sure that drop flags don't screw things up + + let d : HasDrop<Baz<[i32; 4]>> = HasDrop { + ptr: Box::new(0), + data: Baz { a: [1,2,3,4] } + }; + assert_eq!([1,2,3,4], d.data.a); + + let d : &HasDrop<Baz<[i32]>> = &d; + assert_eq!(&[1,2,3,4], &d.data.a); +} diff --git a/compiler/rustc_codegen_gcc/example/example.rs b/compiler/rustc_codegen_gcc/example/example.rs new file mode 100644 index 000000000..5878e8548 --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/example.rs @@ -0,0 +1,208 @@ +#![feature(no_core, unboxed_closures)] +#![no_core] +#![allow(dead_code)] + +extern crate mini_core; + +use mini_core::*; + +fn abc(a: u8) -> u8 { + a * 2 +} + +fn bcd(b: bool, a: u8) -> u8 { + if b { + a * 2 + } else { + a * 3 + } +} + +fn call() { + abc(42); +} + +fn indirect_call() { + let f: fn() = call; + f(); +} + +enum BoolOption { + Some(bool), + None, +} + +fn option_unwrap_or(o: BoolOption, d: bool) -> bool { + match o { + BoolOption::Some(b) => b, + BoolOption::None => d, + } +} + +fn ret_42() -> u8 { + 42 +} + +fn return_str() -> &'static str { + "hello world" +} + +fn promoted_val() -> &'static u8 { + &(1 * 2) +} + +fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 { + abc as *const u8 +} + +fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool { + a == b +} + +fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) { + ( + a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8, + b as u32, + ) +} + +fn char_cast(c: char) -> u8 { + c as u8 +} + +pub struct DebugTuple(()); + +fn debug_tuple() -> DebugTuple { + DebugTuple(()) +} + +fn size_of<T>() -> usize { + intrinsics::size_of::<T>() +} + +fn use_size_of() -> usize { + size_of::<u64>() +} + +unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) { + intrinsics::copy::<u8>(src, dst, 1); +} + +unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) { + let copy2 = &intrinsics::copy::<u8>; + copy2(src, dst, 1); +} + +const ABC: u8 = 6 * 7; + +fn use_const() -> u8 { + ABC +} + +pub fn call_closure_3arg() { + (|_, _, _| {})(0u8, 42u16, 0u8) +} + +pub fn call_closure_2arg() { + (|_, _| {})(0u8, 42u16) +} + +struct IsNotEmpty; + +impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty { + type Output = (u8, u8); + + #[inline] + extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) { + self.call_mut(arg) + } +} + +impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty { + #[inline] + extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) { + (0, 42) + } +} + +pub fn call_is_not_empty() { + IsNotEmpty.call_once((&(&[0u16] as &[_]),)); +} + +fn eq_char(a: char, b: char) -> bool { + a == b +} + +unsafe fn transmute(c: char) -> u32 { + intrinsics::transmute(c) +} + +unsafe fn deref_str_ptr(s: *const str) -> &'static str { + &*s +} + +fn use_array(arr: [u8; 3]) -> u8 { + arr[1] +} + +fn repeat_array() -> [u8; 3] { + [0; 3] +} + +fn array_as_slice(arr: &[u8; 3]) -> &[u8] { + arr +} + +unsafe fn use_ctlz_nonzero(a: u16) -> u16 { + intrinsics::ctlz_nonzero(a) +} + +fn ptr_as_usize(ptr: *const u8) -> usize { + ptr as usize +} + +fn float_cast(a: f32, b: f64) -> (f64, f32) { + (a as f64, b as f32) +} + +fn int_to_float(a: u8, b: i32) -> (f64, f32) { + (a as f64, b as f32) +} + +fn make_array() -> [u8; 3] { + [42, 0, 5] +} + +fn some_promoted_tuple() -> &'static (&'static str, &'static str) { + &("abc", "some") +} + +fn index_slice(s: &[u8]) -> u8 { + s[2] +} + +pub struct StrWrapper { + s: str, +} + +fn str_wrapper_get(w: &StrWrapper) -> &str { + &w.s +} + +fn i16_as_i8(a: i16) -> i8 { + a as i8 +} + +struct Unsized(u8, str); + +fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 { + &u.0 +} + +fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str { + &u.1 +} + +pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 { + a.0 +} diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs new file mode 100644 index 000000000..ddcbb0d9f --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/mini_core.rs @@ -0,0 +1,599 @@ +#![feature( + no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types, + untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits, + thread_local +)] +#![no_core] +#![allow(dead_code)] + +#[no_mangle] +unsafe extern "C" fn _Unwind_Resume() { + intrinsics::unreachable(); +} + +#[lang = "sized"] +pub trait Sized {} + +#[lang = "destruct"] +pub trait Destruct {} + +#[lang = "unsize"] +pub trait Unsize<T: ?Sized> {} + +#[lang = "coerce_unsized"] +pub trait CoerceUnsized<T> {} + +impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} +impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {} +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} + +#[lang = "dispatch_from_dyn"] +pub trait DispatchFromDyn<T> {} + +// &T -> &U +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {} +// &mut T -> &mut U +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {} +// *const T -> *const U +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {} +// *mut T -> *mut U +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {} +impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {} + +#[lang = "receiver"] +pub trait Receiver {} + +impl<T: ?Sized> Receiver for &T {} +impl<T: ?Sized> Receiver for &mut T {} +impl<T: ?Sized> Receiver for Box<T> {} + +#[lang = "copy"] +pub unsafe trait Copy {} + +unsafe impl Copy for bool {} +unsafe impl Copy for u8 {} +unsafe impl Copy for u16 {} +unsafe impl Copy for u32 {} +unsafe impl Copy for u64 {} +unsafe impl Copy for usize {} +unsafe impl Copy for i8 {} +unsafe impl Copy for i16 {} +unsafe impl Copy for i32 {} +unsafe impl Copy for isize {} +unsafe impl Copy for f32 {} +unsafe impl Copy for f64 {} +unsafe impl Copy for char {} +unsafe impl<'a, T: ?Sized> Copy for &'a T {} +unsafe impl<T: ?Sized> Copy for *const T {} +unsafe impl<T: ?Sized> Copy for *mut T {} + +#[lang = "sync"] +pub unsafe trait Sync {} + +unsafe impl Sync for bool {} +unsafe impl Sync for u8 {} +unsafe impl Sync for u16 {} +unsafe impl Sync for u32 {} +unsafe impl Sync for u64 {} +unsafe impl Sync for usize {} +unsafe impl Sync for i8 {} +unsafe impl Sync for i16 {} +unsafe impl Sync for i32 {} +unsafe impl Sync for isize {} +unsafe impl Sync for char {} +unsafe impl<'a, T: ?Sized> Sync for &'a T {} +unsafe impl Sync for [u8; 16] {} + +#[lang = "freeze"] +unsafe auto trait Freeze {} + +unsafe impl<T: ?Sized> Freeze for PhantomData<T> {} +unsafe impl<T: ?Sized> Freeze for *const T {} +unsafe impl<T: ?Sized> Freeze for *mut T {} +unsafe impl<T: ?Sized> Freeze for &T {} +unsafe impl<T: ?Sized> Freeze for &mut T {} + +#[lang = "structural_peq"] +pub trait StructuralPartialEq {} + +#[lang = "structural_teq"] +pub trait StructuralEq {} + +#[lang = "not"] +pub trait Not { + type Output; + + fn not(self) -> Self::Output; +} + +impl Not for bool { + type Output = bool; + + fn not(self) -> bool { + !self + } +} + +#[lang = "mul"] +pub trait Mul<RHS = Self> { + type Output; + + #[must_use] + fn mul(self, rhs: RHS) -> Self::Output; +} + +impl Mul for u8 { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + self * rhs + } +} + +impl Mul for usize { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + self * rhs + } +} + +#[lang = "add"] +pub trait Add<RHS = Self> { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "sub"] +pub trait Sub<RHS = Self> { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} + +impl Sub for usize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for u8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i16 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +#[lang = "rem"] +pub trait Rem<RHS = Self> { + type Output; + + fn rem(self, rhs: RHS) -> Self::Output; +} + +impl Rem for usize { + type Output = Self; + + fn rem(self, rhs: Self) -> Self { + self % rhs + } +} + +#[lang = "bitor"] +pub trait BitOr<RHS = Self> { + type Output; + + #[must_use] + fn bitor(self, rhs: RHS) -> Self::Output; +} + +impl BitOr for bool { + type Output = bool; + + fn bitor(self, rhs: bool) -> bool { + self | rhs + } +} + +impl<'a> BitOr<bool> for &'a bool { + type Output = bool; + + fn bitor(self, rhs: bool) -> bool { + *self | rhs + } +} + +#[lang = "eq"] +pub trait PartialEq<Rhs: ?Sized = Self> { + fn eq(&self, other: &Rhs) -> bool; + fn ne(&self, other: &Rhs) -> bool; +} + +impl PartialEq for u8 { + fn eq(&self, other: &u8) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u8) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for u16 { + fn eq(&self, other: &u16) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u16) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for u32 { + fn eq(&self, other: &u32) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u32) -> bool { + (*self) != (*other) + } +} + + +impl PartialEq for u64 { + fn eq(&self, other: &u64) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u64) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for usize { + fn eq(&self, other: &usize) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &usize) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for i8 { + fn eq(&self, other: &i8) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &i8) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for i32 { + fn eq(&self, other: &i32) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &i32) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for isize { + fn eq(&self, other: &isize) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &isize) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for char { + fn eq(&self, other: &char) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &char) -> bool { + (*self) != (*other) + } +} + +impl<T: ?Sized> PartialEq for *const T { + fn eq(&self, other: &*const T) -> bool { + *self == *other + } + fn ne(&self, other: &*const T) -> bool { + *self != *other + } +} + +#[lang = "neg"] +pub trait Neg { + type Output; + + fn neg(self) -> Self::Output; +} + +impl Neg for i8 { + type Output = i8; + + fn neg(self) -> i8 { + -self + } +} + +impl Neg for i16 { + type Output = i16; + + fn neg(self) -> i16 { + self + } +} + +impl Neg for isize { + type Output = isize; + + fn neg(self) -> isize { + -self + } +} + +impl Neg for f32 { + type Output = f32; + + fn neg(self) -> f32 { + -self + } +} + +pub enum Option<T> { + Some(T), + None, +} + +pub use Option::*; + +#[lang = "phantom_data"] +pub struct PhantomData<T: ?Sized>; + +#[lang = "fn_once"] +#[rustc_paren_sugar] +pub trait FnOnce<Args> { + #[lang = "fn_once_output"] + type Output; + + extern "rust-call" fn call_once(self, args: Args) -> Self::Output; +} + +#[lang = "fn_mut"] +#[rustc_paren_sugar] +pub trait FnMut<Args>: FnOnce<Args> { + extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; +} + +#[lang = "panic"] +#[track_caller] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\n\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_bounds_check"] +#[track_caller] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +#[lang = "eh_personality"] +fn eh_personality() -> ! { + loop {} +} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "deref"] +pub trait Deref { + type Target: ?Sized; + + fn deref(&self) -> &Self::Target; +} + +pub trait Allocator { +} + +pub struct Global; + +impl Allocator for Global {} + +#[lang = "owned_box"] +pub struct Box< + T: ?Sized, + A: Allocator = Global, +>(*mut T, A); + +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {} + +impl<T: ?Sized, A: Allocator> Drop for Box<T, A> { + fn drop(&mut self) { + // drop is currently performed by compiler. + } +} + +impl<T> Deref for Box<T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &**self + } +} + +#[lang = "exchange_malloc"] +unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { + libc::malloc(size) +} + +#[lang = "box_free"] +unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: *mut T, alloc: A) { + libc::free(ptr as *mut u8); +} + +#[lang = "drop"] +pub trait Drop { + fn drop(&mut self); +} + +#[lang = "manually_drop"] +#[repr(transparent)] +pub struct ManuallyDrop<T: ?Sized> { + pub value: T, +} + +#[lang = "maybe_uninit"] +#[repr(transparent)] +pub union MaybeUninit<T> { + pub uninit: (), + pub value: ManuallyDrop<T>, +} + +pub mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + pub fn size_of<T>() -> usize; + pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize; + pub fn min_align_of<T>() -> usize; + pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize; + pub fn copy<T>(src: *const T, dst: *mut T, count: usize); + pub fn transmute<T, U>(e: T) -> U; + pub fn ctlz_nonzero<T>(x: T) -> T; + pub fn needs_drop<T: ?::Sized>() -> bool; + pub fn bitreverse<T>(x: T) -> T; + pub fn bswap<T>(x: T) -> T; + pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize); + pub fn unreachable() -> !; + } +} + +pub mod libc { + #[link(name = "c")] + extern "C" { + pub fn puts(s: *const u8) -> i32; + pub fn printf(format: *const i8, ...) -> i32; + pub fn malloc(size: usize) -> *mut u8; + pub fn free(ptr: *mut u8); + pub fn memcpy(dst: *mut u8, src: *const u8, size: usize); + pub fn memmove(dst: *mut u8, src: *const u8, size: usize); + pub fn strncpy(dst: *mut u8, src: *const u8, size: usize); + } +} + +#[lang = "index"] +pub trait Index<Idx: ?Sized> { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl<T> Index<usize> for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl<T> Index<usize> for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +extern { + type VaListImpl; +} + +#[lang = "va_list"] +#[repr(transparent)] +pub struct VaList<'a>(&'a mut VaListImpl); + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro stringify($($t:tt)*) { /* compiler built-in */ } + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro file() { /* compiler built-in */ } + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro line() { /* compiler built-in */ } + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro cfg() { /* compiler built-in */ } + +pub static A_STATIC: u8 = 42; + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[no_mangle] +pub fn get_tls() -> u8 { + #[thread_local] + static A: u8 = 42; + + A +} diff --git a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs new file mode 100644 index 000000000..14fd9eeff --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs @@ -0,0 +1,431 @@ +// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs + +#![feature( + no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage, + extern_types, thread_local +)] +#![no_core] +#![allow(dead_code, non_camel_case_types)] + +extern crate mini_core; + +use mini_core::*; +use mini_core::libc::*; + +unsafe extern "C" fn my_puts(s: *const u8) { + puts(s); +} + +#[lang = "termination"] +trait Termination { + fn report(self) -> i32; +} + +impl Termination for () { + fn report(self) -> i32 { + unsafe { + NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44 + *NUM_REF as i32 + } + } +} + +trait SomeTrait { + fn object_safe(&self); +} + +impl SomeTrait for &'static str { + fn object_safe(&self) { + unsafe { + puts(*self as *const str as *const u8); + } + } +} + +struct NoisyDrop { + text: &'static str, + inner: NoisyDropInner, +} + +struct NoisyDropUnsized { + inner: NoisyDropInner, + text: str, +} + +struct NoisyDropInner; + +impl Drop for NoisyDrop { + fn drop(&mut self) { + unsafe { + puts(self.text as *const str as *const u8); + } + } +} + +impl Drop for NoisyDropInner { + fn drop(&mut self) { + unsafe { + puts("Inner got dropped!\0" as *const str as *const u8); + } + } +} + +impl SomeTrait for NoisyDrop { + fn object_safe(&self) {} +} + +enum Ordering { + Less = -1, + Equal = 0, + Greater = 1, +} + +#[lang = "start"] +fn start<T: Termination + 'static>( + main: fn() -> T, + argc: isize, + argv: *const *const u8, +) -> isize { + if argc == 3 { + unsafe { puts(*argv); } + unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const u8)); } + unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const u8)); } + } + + main().report(); + 0 +} + +static mut NUM: u8 = 6 * 7; +static NUM_REF: &'static u8 = unsafe { &NUM }; + +macro_rules! assert { + ($e:expr) => { + if !$e { + panic(stringify!(! $e)); + } + }; +} + +macro_rules! assert_eq { + ($l:expr, $r: expr) => { + if $l != $r { + panic(stringify!($l != $r)); + } + } +} + +struct Unique<T: ?Sized> { + pointer: *const T, + _marker: PhantomData<T>, +} + +impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {} + +unsafe fn zeroed<T>() -> T { + let mut uninit = MaybeUninit { uninit: () }; + intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1); + uninit.value.value +} + +fn take_f32(_f: f32) {} +fn take_unique(_u: Unique<()>) {} + +fn return_u128_pair() -> (u128, u128) { + (0, 0) +} + +fn call_return_u128_pair() { + return_u128_pair(); +} + +fn main() { + take_unique(Unique { + pointer: 0 as *const (), + _marker: PhantomData, + }); + take_f32(0.1); + + //call_return_u128_pair(); + + let slice = &[0, 1] as &[i32]; + let slice_ptr = slice as *const [i32] as *const i32; + + assert_eq!(slice_ptr as usize % 4, 0); + + //return; + + unsafe { + printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8); + + let hello: &[u8] = b"Hello\0" as &[u8; 6]; + let ptr: *const u8 = hello as *const [u8] as *const u8; + puts(ptr); + + let world: Box<&str> = box "World!\0"; + puts(*world as *const str as *const u8); + world as Box<dyn SomeTrait>; + + assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8); + + assert_eq!(intrinsics::bswap(0xabu8), 0xabu8); + assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16); + assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32); + assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64); + + assert_eq!(intrinsics::size_of_val(hello) as u8, 6); + + let chars = &['C', 'h', 'a', 'r', 's']; + let chars = chars as &[char]; + assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5); + + let a: &dyn SomeTrait = &"abc\0"; + a.object_safe(); + + assert_eq!(intrinsics::size_of_val(a) as u8, 16); + assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4); + + assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2); + assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8); + + assert!(!intrinsics::needs_drop::<u8>()); + assert!(!intrinsics::needs_drop::<[u8]>()); + assert!(intrinsics::needs_drop::<NoisyDrop>()); + assert!(intrinsics::needs_drop::<NoisyDropUnsized>()); + + Unique { + pointer: 0 as *const &str, + _marker: PhantomData, + } as Unique<dyn SomeTrait>; + + struct MyDst<T: ?Sized>(T); + + intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>); + + struct Foo { + x: u8, + y: !, + } + + unsafe fn uninitialized<T>() -> T { + MaybeUninit { uninit: () }.value.value + } + + zeroed::<(u8, u8)>(); + #[allow(unreachable_code)] + { + if false { + zeroed::<!>(); + zeroed::<Foo>(); + uninitialized::<Foo>(); + } + } + } + + let _ = box NoisyDrop { + text: "Boxed outer got dropped!\0", + inner: NoisyDropInner, + } as Box<dyn SomeTrait>; + + const FUNC_REF: Option<fn()> = Some(main); + match FUNC_REF { + Some(_) => {}, + None => assert!(false), + } + + match Ordering::Less { + Ordering::Less => {}, + _ => assert!(false), + } + + [NoisyDropInner, NoisyDropInner]; + + let x = &[0u32, 42u32] as &[u32]; + match x { + [] => assert_eq!(0u32, 1), + [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize), + } + + assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42); + + extern { + #[linkage = "weak"] + static ABC: *const u8; + } + + { + extern { + #[linkage = "weak"] + static ABC: *const u8; + } + } + + // TODO(antoyo): to make this work, support weak linkage. + //unsafe { assert_eq!(ABC as usize, 0); } + + &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>; + + let f = 1000.0; + assert_eq!(f as u8, 255); + let f2 = -1000.0; + assert_eq!(f2 as i8, -128); + assert_eq!(f2 as u8, 0); + + static ANOTHER_STATIC: &u8 = &A_STATIC; + assert_eq!(*ANOTHER_STATIC, 42); + + check_niche_behavior(); + + extern "C" { + type ExternType; + } + + struct ExternTypeWrapper { + _a: ExternType, + } + + let nullptr = 0 as *const (); + let extern_nullptr = nullptr as *const ExternTypeWrapper; + extern_nullptr as *const (); + let slice_ptr = &[] as *const [u8]; + slice_ptr as *const u8; + + #[cfg(not(jit))] + test_tls(); +} + +#[repr(C)] +enum c_void { + _1, + _2, +} + +type c_int = i32; +type c_ulong = u64; + +type pthread_t = c_ulong; + +#[repr(C)] +struct pthread_attr_t { + __size: [u64; 7], +} + +#[link(name = "pthread")] +extern "C" { + fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int; + + fn pthread_create( + native: *mut pthread_t, + attr: *const pthread_attr_t, + f: extern "C" fn(_: *mut c_void) -> *mut c_void, + value: *mut c_void + ) -> c_int; + + fn pthread_join( + native: pthread_t, + value: *mut *mut c_void + ) -> c_int; +} + +#[thread_local] +#[cfg(not(jit))] +static mut TLS: u8 = 42; + +#[cfg(not(jit))] +extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void { + unsafe { TLS = 0; } + 0 as *mut c_void +} + +#[cfg(not(jit))] +fn test_tls() { + unsafe { + let mut attr: pthread_attr_t = zeroed(); + let mut thread: pthread_t = 0; + + assert_eq!(TLS, 42); + + if pthread_attr_init(&mut attr) != 0 { + assert!(false); + } + + if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 { + assert!(false); + } + + let mut res = 0 as *mut c_void; + pthread_join(thread, &mut res); + + // TLS of main thread must not have been changed by the other thread. + assert_eq!(TLS, 42); + + puts("TLS works!\n\0" as *const str as *const u8); + } +} + +// Copied ui/issues/issue-61696.rs + +pub enum Infallible {} + +// The check that the `bool` field of `V1` is encoding a "niche variant" +// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect, +// causing valid `V1` values to be interpreted as other variants. +pub enum E1 { + V1 { f: bool }, + V2 { f: Infallible }, + V3, + V4, +} + +// Computing the discriminant used to be done using the niche type (here `u8`, +// from the `bool` field of `V1`), overflowing for variants with large enough +// indices (`V3` and `V4`), causing them to be interpreted as other variants. +pub enum E2<X> { + V1 { f: bool }, + + /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X), + _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X), + _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X), + _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X), + _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X), + _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X), + _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X), + _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X), + _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X), + _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X), + _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X), + _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X), + _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X), + _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X), + _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X), + _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X), + _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X), + _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X), + _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X), + _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X), + _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X), + _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X), + _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X), + _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X), + _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X), + _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X), + _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X), + _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X), + _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X), + _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X), + _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X), + _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X), + + V3, + V4, +} + +fn check_niche_behavior () { + if let E1::V2 { .. } = (E1::V1 { f: true }) { + intrinsics::abort(); + } + + if let E2::V1 { .. } = E2::V3::<Infallible> { + intrinsics::abort(); + } +} diff --git a/compiler/rustc_codegen_gcc/example/mod_bench.rs b/compiler/rustc_codegen_gcc/example/mod_bench.rs new file mode 100644 index 000000000..2e2b0052d --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/mod_bench.rs @@ -0,0 +1,37 @@ +#![feature(start, box_syntax, core_intrinsics, lang_items)] +#![no_std] + +#[link(name = "c")] +extern {} + +#[panic_handler] +fn panic_handler(_: &core::panic::PanicInfo) -> ! { + unsafe { + core::intrinsics::abort(); + } +} + +#[lang="eh_personality"] +fn eh_personality(){} + +// Required for rustc_codegen_llvm +#[no_mangle] +unsafe extern "C" fn _Unwind_Resume() { + core::intrinsics::unreachable(); +} + +#[start] +fn main(_argc: isize, _argv: *const *const u8) -> isize { + for i in 2..100_000_000 { + black_box((i + 1) % i); + } + + 0 +} + +#[inline(never)] +fn black_box(i: u32) { + if i != 1 { + unsafe { core::intrinsics::abort(); } + } +} diff --git a/compiler/rustc_codegen_gcc/example/std_example.rs b/compiler/rustc_codegen_gcc/example/std_example.rs new file mode 100644 index 000000000..31069058a --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/std_example.rs @@ -0,0 +1,286 @@ +#![feature(core_intrinsics, generators, generator_trait, is_sorted)] + +use std::arch::x86_64::*; +use std::io::Write; +use std::ops::Generator; + +extern { + pub fn printf(format: *const i8, ...) -> i32; +} + +fn main() { + let mutex = std::sync::Mutex::new(()); + let _guard = mutex.lock().unwrap(); + + let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>(); + let stderr = ::std::io::stderr(); + let mut stderr = stderr.lock(); + + std::thread::spawn(move || { + println!("Hello from another thread!"); + }); + + writeln!(stderr, "some {} text", "<unknown>").unwrap(); + + let _ = std::process::Command::new("true").env("c", "d").spawn(); + + println!("cargo:rustc-link-lib=z"); + + static ONCE: std::sync::Once = std::sync::Once::new(); + ONCE.call_once(|| {}); + + let _eq = LoopState::Continue(()) == LoopState::Break(()); + + // Make sure ByValPair values with differently sized components are correctly passed + map(None::<(u8, Box<Instruction>)>); + + println!("{}", 2.3f32.exp()); + println!("{}", 2.3f32.exp2()); + println!("{}", 2.3f32.abs()); + println!("{}", 2.3f32.sqrt()); + println!("{}", 2.3f32.floor()); + println!("{}", 2.3f32.ceil()); + println!("{}", 2.3f32.min(1.0)); + println!("{}", 2.3f32.max(1.0)); + println!("{}", 2.3f32.powi(2)); + println!("{}", 2.3f32.log2()); + assert_eq!(2.3f32.copysign(-1.0), -2.3f32); + println!("{}", 2.3f32.powf(2.0)); + + assert_eq!(-128i8, (-128i8).saturating_sub(1)); + assert_eq!(127i8, 127i8.saturating_sub(-128)); + assert_eq!(-128i8, (-128i8).saturating_add(-128)); + assert_eq!(127i8, 127i8.saturating_add(1)); + + assert_eq!(-32768i16, (-32768i16).saturating_add(-32768)); + assert_eq!(32767i16, 32767i16.saturating_add(1)); + + assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26); + assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7); + + let _d = 0i128.checked_div(2i128); + let _d = 0u128.checked_div(2u128); + assert_eq!(1u128 + 2, 3); + + assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128); + assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128); + assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128); + + let tmp = 353985398u128; + assert_eq!(tmp * 932490u128, 330087843781020u128); + + let tmp = -0x1234_5678_9ABC_DEF0i64; + assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128); + + // Check that all u/i128 <-> float casts work correctly. + let houndred_u128 = 100u128; + let houndred_i128 = 100i128; + let houndred_f32 = 100.0f32; + let houndred_f64 = 100.0f64; + assert_eq!(houndred_u128 as f32, 100.0); + assert_eq!(houndred_u128 as f64, 100.0); + assert_eq!(houndred_f32 as u128, 100); + assert_eq!(houndred_f64 as u128, 100); + assert_eq!(houndred_i128 as f32, 100.0); + assert_eq!(houndred_i128 as f64, 100.0); + assert_eq!(houndred_f32 as i128, 100); + assert_eq!(houndred_f64 as i128, 100); + + let _a = 1u32 << 2u8; + + let empty: [i32; 0] = []; + assert!(empty.is_sorted()); + + println!("{:?}", std::intrinsics::caller_location()); + + #[cfg(feature="master")] + unsafe { + test_simd(); + } + + Box::pin(move |mut _task_context| { + yield (); + }).as_mut().resume(0); + + println!("End"); +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse2")] +unsafe fn test_simd() { + let x = _mm_setzero_si128(); + let y = _mm_set1_epi16(7); + let or = _mm_or_si128(x, y); + let cmp_eq = _mm_cmpeq_epi8(y, y); + let cmp_lt = _mm_cmplt_epi8(y, y); + + assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]); + assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]); + assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]); + + test_mm_slli_si128(); + test_mm_movemask_epi8(); + test_mm256_movemask_epi8(); + test_mm_add_epi8(); + test_mm_add_pd(); + test_mm_cvtepi8_epi16(); + test_mm_cvtsi128_si64(); + + test_mm_extract_epi8(); + test_mm_insert_epi16(); + + let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))); + assert_eq!(mask1, 1); +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse2")] +unsafe fn test_mm_slli_si128() { + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, 1); + let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + assert_eq_m128i(r, e); + + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, 15); + let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1); + assert_eq_m128i(r, e); + + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, 16); + assert_eq_m128i(r, _mm_set1_epi8(0)); +} + + +#[cfg(feature="master")] +#[target_feature(enable = "sse2")] +unsafe fn test_mm_movemask_epi8() { + #[rustfmt::skip] + let a = _mm_setr_epi8( + 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01, + 0b0101, 0b1111_0000u8 as i8, 0, 0, + 0, 0, 0b1111_0000u8 as i8, 0b0101, + 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, + ); + let r = _mm_movemask_epi8(a); + assert_eq!(r, 0b10100100_00100101); +} + +#[cfg(feature="master")] +#[target_feature(enable = "avx2")] +unsafe fn test_mm256_movemask_epi8() { + let a = _mm256_set1_epi8(-1); + let r = _mm256_movemask_epi8(a); + let e = -1; + assert_eq!(r, e); +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse2")] +unsafe fn test_mm_add_epi8() { + let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + #[rustfmt::skip] + let b = _mm_setr_epi8( + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + ); + let r = _mm_add_epi8(a, b); + #[rustfmt::skip] + let e = _mm_setr_epi8( + 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, + ); + assert_eq_m128i(r, e); +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse2")] +unsafe fn test_mm_add_pd() { + let a = _mm_setr_pd(1.0, 2.0); + let b = _mm_setr_pd(5.0, 10.0); + let r = _mm_add_pd(a, b); + assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0)); +} + +#[cfg(feature="master")] +fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) { + unsafe { + assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y)); + } +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse2")] +pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) { + if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 { + panic!("{:?} != {:?}", a, b); + } +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse2")] +unsafe fn test_mm_cvtsi128_si64() { + let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0])); + assert_eq!(r, 5); +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse4.1")] +unsafe fn test_mm_cvtepi8_epi16() { + let a = _mm_set1_epi8(10); + let r = _mm_cvtepi8_epi16(a); + let e = _mm_set1_epi16(10); + assert_eq_m128i(r, e); + let a = _mm_set1_epi8(-10); + let r = _mm_cvtepi8_epi16(a); + let e = _mm_set1_epi16(-10); + assert_eq_m128i(r, e); +} + +#[cfg(feature="master")] +#[target_feature(enable = "sse4.1")] +unsafe fn test_mm_extract_epi8() { + #[rustfmt::skip] + let a = _mm_setr_epi8( + -1, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15 + ); + let r1 = _mm_extract_epi8(a, 0); + let r2 = _mm_extract_epi8(a, 3); + assert_eq!(r1, 0xFF); + assert_eq!(r2, 3); +} + +#[cfg(all(feature="master", target_arch = "x86_64"))] +#[target_feature(enable = "sse2")] +unsafe fn test_mm_insert_epi16() { + let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); + let r = _mm_insert_epi16(a, 9, 0); + let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7); + assert_eq_m128i(r, e); +} + +#[derive(PartialEq)] +enum LoopState { + Continue(()), + Break(()) +} + +pub enum Instruction { + Increment, + Loop, +} + +fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> { + match a { + None => None, + Some((_, instr)) => Some(instr), + } +} diff --git a/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs new file mode 100644 index 000000000..2cb84786f --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs @@ -0,0 +1,97 @@ +// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs + +// Test that array subslice patterns are correctly handled in const evaluation. + +// run-pass + +#[derive(PartialEq, Debug, Clone)] +struct N(u8); + +#[derive(PartialEq, Debug, Clone)] +struct Z; + +macro_rules! n { + ($($e:expr),* $(,)?) => { + [$(N($e)),*] + } +} + +// This macro has an unused variable so that it can be repeated base on the +// number of times a repeated variable (`$e` in `z`) occurs. +macro_rules! zed { + ($e:expr) => { Z } +} + +macro_rules! z { + ($($e:expr),* $(,)?) => { + [$(zed!($e)),*] + } +} + +// Compare constant evaluation and runtime evaluation of a given expression. +macro_rules! compare_evaluation { + ($e:expr, $t:ty $(,)?) => {{ + const CONST_EVAL: $t = $e; + const fn const_eval() -> $t { $e } + static CONST_EVAL2: $t = const_eval(); + let runtime_eval = $e; + assert_eq!(CONST_EVAL, runtime_eval); + assert_eq!(CONST_EVAL2, runtime_eval); + }} +} + +// Repeat `$test`, substituting the given macro variables with the given +// identifiers. +// +// For example: +// +// repeat! { +// ($name); X; Y: +// struct $name; +// } +// +// Expands to: +// +// struct X; struct Y; +// +// This is used to repeat the tests using both the `N` and `Z` +// types. +macro_rules! repeat { + (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => { + macro_rules! single { + ($($dollar $placeholder:ident),*) => { $($test)* } + } + $(single!($($values),+);)* + } +} + +fn main() { + repeat! { + ($arr $Ty); n, N; z, Z: + compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]); + compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]); + compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]); + + compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]); + compare_evaluation!( + { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x }, + &'static [$Ty; 0], + ); + compare_evaluation!( + { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x }, + &'static [$Ty; 0], + ); + + compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty); + compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty); + compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty); + } + + compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8); + compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8); + compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8); + + compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8); + compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8); + compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8); +} diff --git a/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs b/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs new file mode 100644 index 000000000..93bab17e4 --- /dev/null +++ b/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs @@ -0,0 +1,40 @@ +// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs + +// run-pass + +use std::panic::Location; + +#[track_caller] +fn tracked() -> &'static Location<'static> { + Location::caller() +} + +fn nested_intrinsic() -> &'static Location<'static> { + Location::caller() +} + +fn nested_tracked() -> &'static Location<'static> { + tracked() +} + +fn main() { + let location = Location::caller(); + assert_eq!(location.file(), file!()); + assert_eq!(location.line(), 21); + assert_eq!(location.column(), 20); + + let tracked = tracked(); + assert_eq!(tracked.file(), file!()); + assert_eq!(tracked.line(), 26); + assert_eq!(tracked.column(), 19); + + let nested = nested_intrinsic(); + assert_eq!(nested.file(), file!()); + assert_eq!(nested.line(), 13); + assert_eq!(nested.column(), 5); + + let contained = nested_tracked(); + assert_eq!(contained.file(), file!()); + assert_eq!(contained.line(), 17); + assert_eq!(contained.column(), 5); +} |