summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_cranelift/example
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_codegen_cranelift/example
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_cranelift/example')
-rw-r--r--compiler/rustc_codegen_cranelift/example/alloc_example.rs38
-rw-r--r--compiler/rustc_codegen_cranelift/example/alloc_system.rs130
-rw-r--r--compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs68
-rw-r--r--compiler/rustc_codegen_cranelift/example/dst-field-align.rs67
-rw-r--r--compiler/rustc_codegen_cranelift/example/example.rs208
-rw-r--r--compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs53
-rw-r--r--compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs59
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core.rs657
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs515
-rw-r--r--compiler/rustc_codegen_cranelift/example/mod_bench.rs36
-rw-r--r--compiler/rustc_codegen_cranelift/example/std_example.rs356
-rw-r--r--compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs97
-rw-r--r--compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs40
13 files changed, 2324 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_example.rs b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
new file mode 100644
index 000000000..bc1594d82
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
@@ -0,0 +1,38 @@
+#![feature(start, core_intrinsics, alloc_error_handler, box_syntax)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::boxed::Box;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[cfg_attr(unix, link(name = "c"))]
+#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+extern "C" {
+ fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ core::intrinsics::abort();
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+ core::intrinsics::abort();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ let world: Box<&str> = box "Hello World!\0";
+ unsafe {
+ puts(*world as *const str as *const u8);
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_system.rs b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
new file mode 100644
index 000000000..cf95c89bc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
@@ -0,0 +1,130 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![no_std]
+
+pub struct System;
+
+#[cfg(any(windows, unix, target_os = "redox"))]
+mod realloc_fallback {
+ use core::alloc::{GlobalAlloc, Layout};
+ use core::cmp;
+ use core::ptr;
+ impl super::System {
+ pub(crate) unsafe fn realloc_fallback(
+ &self,
+ ptr: *mut u8,
+ old_layout: Layout,
+ new_size: usize,
+ ) -> *mut u8 {
+ // Docs for GlobalAlloc::realloc require this to be valid:
+ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+ let new_ptr = GlobalAlloc::alloc(self, new_layout);
+ if !new_ptr.is_null() {
+ let size = cmp::min(old_layout.size(), new_size);
+ ptr::copy_nonoverlapping(ptr, new_ptr, size);
+ GlobalAlloc::dealloc(self, ptr, old_layout);
+ }
+ new_ptr
+ }
+ }
+}
+#[cfg(any(unix, target_os = "redox"))]
+mod platform {
+ use core::alloc::{GlobalAlloc, Layout};
+ use core::ffi::c_void;
+ use core::ptr;
+ use System;
+ extern "C" {
+ fn posix_memalign(memptr: *mut *mut c_void, align: usize, size: usize) -> i32;
+ fn free(p: *mut c_void);
+ }
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ aligned_malloc(&layout)
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let ptr = self.alloc(layout.clone());
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
+ }
+ ptr
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ free(ptr as *mut c_void)
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ let mut out = ptr::null_mut();
+ let ret = posix_memalign(&mut out, layout.align(), layout.size());
+ if ret != 0 { ptr::null_mut() } else { out as *mut u8 }
+ }
+}
+#[cfg(windows)]
+#[allow(nonstandard_style)]
+mod platform {
+ use core::alloc::{GlobalAlloc, Layout};
+ use System;
+ type LPVOID = *mut u8;
+ type HANDLE = LPVOID;
+ type SIZE_T = usize;
+ type DWORD = u32;
+ type BOOL = i32;
+ extern "system" {
+ fn GetProcessHeap() -> HANDLE;
+ fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
+ fn GetLastError() -> DWORD;
+ }
+ #[repr(C)]
+ struct Header(*mut u8);
+ const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+ unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+ &mut *(ptr as *mut Header).offset(-1)
+ }
+ unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+ *get_header(aligned) = Header(ptr);
+ aligned
+ }
+ #[inline]
+ unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
+ let size = layout.size() + layout.align();
+ let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+ (if ptr.is_null() { ptr } else { align_ptr(ptr, layout.align()) }) as *mut u8
+ }
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, 0)
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, HEAP_ZERO_MEMORY)
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ let header = get_header(ptr);
+ let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
new file mode 100644
index 000000000..d270fec6b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
@@ -0,0 +1,68 @@
+// Adapted from rustc run-pass test suite
+
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+
+use std::{
+ ops::{Deref, CoerceUnsized, DispatchFromDyn},
+ marker::Unsize,
+};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+ // without unsized_locals), but wrappers around `Self` currently are not.
+ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+ // fn wrapper(self: Wrapper<Self>) -> i32;
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+ ***self
+ }
+}
+
+fn main() {
+ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+ assert_eq!(pw.ptr_wrapper(), 5);
+
+ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+ assert_eq!(wp.wrapper_ptr(), 6);
+
+ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/dst-field-align.rs b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
new file mode 100644
index 000000000..6c338e999
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
@@ -0,0 +1,67 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo<T: ?Sized> {
+ a: u16,
+ b: T
+}
+
+trait Bar {
+ fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+ fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+ a: T
+}
+
+struct HasDrop<T: ?Sized> {
+ ptr: Box<usize>,
+ data: T
+}
+
+fn main() {
+ // Test that zero-offset works properly
+ let b : Baz<usize> = Baz { a: 7 };
+ assert_eq!(b.a.get(), 7);
+ let b : &Baz<dyn Bar> = &b;
+ assert_eq!(b.a.get(), 7);
+
+ // Test that the field is aligned properly
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ assert_eq!(f.b.get(), 11);
+ let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+ let f : &Foo<dyn Bar> = &f;
+ let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+ assert_eq!(f.b.get(), 11);
+
+ // The pointers should be the same
+ assert_eq!(ptr1, ptr2);
+
+ // Test that nested DSTs work properly
+ let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+ assert_eq!(f.b.b.get(), 17);
+ let f : &Foo<Foo<dyn Bar>> = &f;
+ assert_eq!(f.b.b.get(), 17);
+
+ // Test that get the pointer via destructuring works
+
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ let f : &Foo<dyn Bar> = &f;
+ let &Foo { a: _, b: ref bar } = f;
+ assert_eq!(bar.get(), 11);
+
+ // Make sure that drop flags don't screw things up
+
+ let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+ ptr: Box::new(0),
+ data: Baz { a: [1,2,3,4] }
+ };
+ assert_eq!([1,2,3,4], d.data.a);
+
+ let d : &HasDrop<Baz<[i32]>> = &d;
+ assert_eq!(&[1,2,3,4], &d.data.a);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/example.rs b/compiler/rustc_codegen_cranelift/example/example.rs
new file mode 100644
index 000000000..d5c122bf6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/example.rs
@@ -0,0 +1,208 @@
+#![feature(no_core, unboxed_closures)]
+#![no_core]
+#![allow(dead_code)]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+pub fn abc(a: u8) -> u8 {
+ a * 2
+}
+
+pub fn bcd(b: bool, a: u8) -> u8 {
+ if b {
+ a * 2
+ } else {
+ a * 3
+ }
+}
+
+pub fn call() {
+ abc(42);
+}
+
+pub fn indirect_call() {
+ let f: fn() = call;
+ f();
+}
+
+pub enum BoolOption {
+ Some(bool),
+ None,
+}
+
+pub fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
+ match o {
+ BoolOption::Some(b) => b,
+ BoolOption::None => d,
+ }
+}
+
+pub fn ret_42() -> u8 {
+ 42
+}
+
+pub fn return_str() -> &'static str {
+ "hello world"
+}
+
+pub fn promoted_val() -> &'static u8 {
+ &(1 * 2)
+}
+
+pub fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
+ abc as *const u8
+}
+
+pub fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
+ a == b
+}
+
+pub fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+ (
+ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+ b as u32,
+ )
+}
+
+pub fn char_cast(c: char) -> u8 {
+ c as u8
+}
+
+pub struct DebugTuple(());
+
+pub fn debug_tuple() -> DebugTuple {
+ DebugTuple(())
+}
+
+pub fn size_of<T>() -> usize {
+ intrinsics::size_of::<T>()
+}
+
+pub fn use_size_of() -> usize {
+ size_of::<u64>()
+}
+
+pub unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
+ intrinsics::copy::<u8>(src, dst, 1);
+}
+
+pub unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
+ let copy2 = &intrinsics::copy::<u8>;
+ copy2(src, dst, 1);
+}
+
+pub const ABC: u8 = 6 * 7;
+
+pub fn use_const() -> u8 {
+ ABC
+}
+
+pub fn call_closure_3arg() {
+ (|_, _, _| {})(0u8, 42u16, 0u8)
+}
+
+pub fn call_closure_2arg() {
+ (|_, _| {})(0u8, 42u16)
+}
+
+pub struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
+ type Output = (u8, u8);
+
+ #[inline]
+ extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
+ self.call_mut(arg)
+ }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
+ #[inline]
+ extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
+ (0, 42)
+ }
+}
+
+pub fn call_is_not_empty() {
+ IsNotEmpty.call_once((&(&[0u16] as &[_]),));
+}
+
+pub fn eq_char(a: char, b: char) -> bool {
+ a == b
+}
+
+pub unsafe fn transmute(c: char) -> u32 {
+ intrinsics::transmute(c)
+}
+
+pub unsafe fn deref_str_ptr(s: *const str) -> &'static str {
+ &*s
+}
+
+pub fn use_array(arr: [u8; 3]) -> u8 {
+ arr[1]
+}
+
+pub fn repeat_array() -> [u8; 3] {
+ [0; 3]
+}
+
+pub fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
+ arr
+}
+
+pub unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
+ intrinsics::ctlz_nonzero(a)
+}
+
+pub fn ptr_as_usize(ptr: *const u8) -> usize {
+ ptr as usize
+}
+
+pub fn float_cast(a: f32, b: f64) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+pub fn int_to_float(a: u8, b: i32) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+pub fn make_array() -> [u8; 3] {
+ [42, 0, 5]
+}
+
+pub fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
+ &("abc", "some")
+}
+
+pub fn index_slice(s: &[u8]) -> u8 {
+ s[2]
+}
+
+pub struct StrWrapper {
+ s: str,
+}
+
+pub fn str_wrapper_get(w: &StrWrapper) -> &str {
+ &w.s
+}
+
+pub fn i16_as_i8(a: i16) -> i8 {
+ a as i8
+}
+
+pub struct Unsized(u8, str);
+
+pub fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
+ &u.0
+}
+
+pub fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
+ &u.1
+}
+
+pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
+ a.0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs b/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs
new file mode 100644
index 000000000..b8f901d1b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs
@@ -0,0 +1,53 @@
+// Copied from https://github.com/rust-lang/rust/blob/3fe3b89cd57229343eeca753fdd8c63d9b03c65c/src/test/ui/simd/intrinsic/float-minmax-pass.rs
+// run-pass
+// ignore-emscripten
+
+// Test that the simd_f{min,max} intrinsics produce the correct results.
+
+#![feature(repr_simd, platform_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[repr(simd)]
+#[derive(Copy, Clone, PartialEq, Debug)]
+struct f32x4(pub f32, pub f32, pub f32, pub f32);
+
+extern "platform-intrinsic" {
+ fn simd_fmin<T>(x: T, y: T) -> T;
+ fn simd_fmax<T>(x: T, y: T) -> T;
+}
+
+fn main() {
+ let x = f32x4(1.0, 2.0, 3.0, 4.0);
+ let y = f32x4(2.0, 1.0, 4.0, 3.0);
+
+ #[cfg(not(any(target_arch = "mips", target_arch = "mips64")))]
+ let nan = f32::NAN;
+ // MIPS hardware treats f32::NAN as SNAN. Clear the signaling bit.
+ // See https://github.com/rust-lang/rust/issues/52746.
+ #[cfg(any(target_arch = "mips", target_arch = "mips64"))]
+ let nan = f32::from_bits(f32::NAN.to_bits() - 1);
+
+ let n = f32x4(nan, nan, nan, nan);
+
+ unsafe {
+ let min0 = simd_fmin(x, y);
+ let min1 = simd_fmin(y, x);
+ assert_eq!(min0, min1);
+ let e = f32x4(1.0, 1.0, 3.0, 3.0);
+ assert_eq!(min0, e);
+ let minn = simd_fmin(x, n);
+ assert_eq!(minn, x);
+ let minn = simd_fmin(y, n);
+ assert_eq!(minn, y);
+
+ let max0 = simd_fmax(x, y);
+ let max1 = simd_fmax(y, x);
+ assert_eq!(max0, max1);
+ let e = f32x4(2.0, 2.0, 4.0, 4.0);
+ assert_eq!(max0, e);
+ let maxn = simd_fmax(x, n);
+ assert_eq!(maxn, x);
+ let maxn = simd_fmax(y, n);
+ assert_eq!(maxn, y);
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs b/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs
new file mode 100644
index 000000000..2ecc8b823
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs
@@ -0,0 +1,59 @@
+// Copied from rustc ui test suite
+
+// run-pass
+//
+// Test that we can handle unsized types with an extern type tail part.
+// Regression test for issue #91827.
+
+#![feature(const_ptr_offset_from)]
+#![feature(extern_types)]
+
+use std::ptr::addr_of;
+
+extern "C" {
+ type Opaque;
+}
+
+unsafe impl Sync for Opaque {}
+
+#[repr(C)]
+pub struct List<T> {
+ len: usize,
+ data: [T; 0],
+ tail: Opaque,
+}
+
+#[repr(C)]
+pub struct ListImpl<T, const N: usize> {
+ len: usize,
+ data: [T; N],
+}
+
+impl<T> List<T> {
+ const fn as_slice(&self) -> &[T] {
+ unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.len) }
+ }
+}
+
+impl<T, const N: usize> ListImpl<T, N> {
+ const fn as_list(&self) -> &List<T> {
+ unsafe { std::mem::transmute(self) }
+ }
+}
+
+pub static A: ListImpl<u128, 3> = ListImpl {
+ len: 3,
+ data: [5, 6, 7],
+};
+pub static A_REF: &'static List<u128> = A.as_list();
+pub static A_TAIL_OFFSET: isize = tail_offset(A.as_list());
+
+const fn tail_offset<T>(list: &List<T>) -> isize {
+ unsafe { (addr_of!(list.tail) as *const u8).offset_from(list as *const List<T> as *const u8) }
+}
+
+fn main() {
+ assert_eq!(A_REF.as_slice(), &[5, 6, 7]);
+ // Check that interpreter and code generation agree about the position of the tail field.
+ assert_eq!(A_TAIL_OFFSET, tail_offset(A_REF));
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs
new file mode 100644
index 000000000..8b6042a3d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs
@@ -0,0 +1,657 @@
+#![feature(
+ no_core,
+ lang_items,
+ intrinsics,
+ unboxed_closures,
+ extern_types,
+ decl_macro,
+ rustc_attrs,
+ transparent_unions,
+ auto_traits,
+ thread_local
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for u128 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for f64 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+unsafe impl<T: Copy> Copy for Option<T> {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+ type Output;
+
+ fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+ type Output = bool;
+
+ fn not(self) -> bool {
+ !self
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+ type Output;
+
+ fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+ type Output = Self;
+
+ fn rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ self | rhs
+ }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ *self | rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u128 {
+ fn eq(&self, other: &u128) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u128) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+ fn ne(&self, other: &*const T) -> bool {
+ *self != *other
+ }
+}
+
+impl <T: PartialEq> PartialEq for Option<T> {
+ fn eq(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs == *rhs,
+ (None, None) => true,
+ _ => false,
+ }
+ }
+
+ fn ne(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs != *rhs,
+ (None, None) => false,
+ _ => true,
+ }
+ }
+}
+
+#[lang = "shl"]
+pub trait Shl<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn shl(self, rhs: RHS) -> Self::Output;
+}
+
+impl Shl for u128 {
+ type Output = u128;
+
+ fn shl(self, rhs: u128) -> u128 {
+ self << rhs
+ }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+ type Output;
+
+ fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+ type Output = i8;
+
+ fn neg(self) -> i8 {
+ -self
+ }
+}
+
+impl Neg for i16 {
+ type Output = i16;
+
+ fn neg(self) -> i16 {
+ self
+ }
+}
+
+impl Neg for isize {
+ type Output = isize;
+
+ fn neg(self) -> isize {
+ -self
+ }
+}
+
+impl Neg for f32 {
+ type Output = f32;
+
+ fn neg(self) -> f32 {
+ -self
+ }
+}
+
+pub enum Option<T> {
+ Some(T),
+ None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &'static str) -> ! {
+ unsafe {
+ libc::puts("Panicking\n\0" as *const str as *const i8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+ loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(1)]
+#[rustc_nonnull_optimization_guaranteed]
+pub struct NonNull<T: ?Sized>(pub *const T);
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+
+pub struct Unique<T: ?Sized> {
+ pub pointer: NonNull<T>,
+ pub _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized>(Unique<T>, ());
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized> Drop for Box<T> {
+ fn drop(&mut self) {
+ // drop is currently performed by compiler.
+ }
+}
+
+impl<T: ?Sized> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &**self
+ }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+ libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: Unique<T>, alloc: ()) {
+ libc::free(ptr.pointer.0 as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+ fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ pub uninit: (),
+ pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ pub fn size_of<T>() -> usize;
+ pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn min_align_of<T>() -> usize;
+ pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn transmute<T, U>(e: T) -> U;
+ pub fn ctlz_nonzero<T>(x: T) -> T;
+ pub fn needs_drop<T: ?::Sized>() -> bool;
+ pub fn bitreverse<T>(x: T) -> T;
+ pub fn bswap<T>(x: T) -> T;
+ pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ }
+}
+
+pub mod libc {
+ #[cfg_attr(unix, link(name = "c"))]
+ #[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+ extern "C" {
+ pub fn puts(s: *const i8) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn malloc(size: usize) -> *mut u8;
+ pub fn free(ptr: *mut u8);
+ pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+ pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+ pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+extern {
+ type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro global_asm() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[no_mangle]
+#[cfg(not(windows))]
+pub fn get_tls() -> u8 {
+ #[thread_local]
+ static A: u8 = 42;
+
+ A
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
new file mode 100644
index 000000000..aa1f239ba
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
@@ -0,0 +1,515 @@
+#![feature(no_core, lang_items, never_type, linkage, extern_types, thread_local, box_syntax)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+macro_rules! assert {
+ ($e:expr) => {
+ if !$e {
+ panic(stringify!(! $e));
+ }
+ };
+}
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(stringify!($l != $r));
+ }
+ }
+}
+
+#[lang = "termination"]
+trait Termination {
+ fn report(self) -> i32;
+}
+
+impl Termination for () {
+ fn report(self) -> i32 {
+ unsafe {
+ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+ assert_eq!(*NUM_REF as i32, 44);
+ }
+ 0
+ }
+}
+
+trait SomeTrait {
+ fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+ fn object_safe(&self) {
+ unsafe {
+ puts(*self as *const str as *const i8);
+ }
+ }
+}
+
+struct NoisyDrop {
+ text: &'static str,
+ inner: NoisyDropInner,
+}
+
+struct NoisyDropUnsized {
+ inner: NoisyDropInner,
+ text: str,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+ fn drop(&mut self) {
+ unsafe {
+ puts(self.text as *const str as *const i8);
+ }
+ }
+}
+
+impl Drop for NoisyDropInner {
+ fn drop(&mut self) {
+ unsafe {
+ puts("Inner got dropped!\0" as *const str as *const i8);
+ }
+ }
+}
+
+impl SomeTrait for NoisyDrop {
+ fn object_safe(&self) {}
+}
+
+enum Ordering {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
+) -> isize {
+ if argc == 3 {
+ unsafe { puts(*argv as *const i8); }
+ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ }
+
+ main().report() as isize
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+
+unsafe fn zeroed<T>() -> T {
+ let mut uninit = MaybeUninit { uninit: () };
+ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+ uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+ (0, 0)
+}
+
+fn call_return_u128_pair() {
+ return_u128_pair();
+}
+
+#[repr(C)]
+pub struct bool_11 {
+ field0: bool,
+ field1: bool,
+ field2: bool,
+ field3: bool,
+ field4: bool,
+ field5: bool,
+ field6: bool,
+ field7: bool,
+ field8: bool,
+ field9: bool,
+ field10: bool,
+}
+
+extern "C" fn bool_struct_in_11(arg0: bool_11) {}
+
+#[allow(unreachable_code)] // FIXME false positive
+fn main() {
+ take_unique(Unique {
+ pointer: unsafe { NonNull(1 as *mut ()) },
+ _marker: PhantomData,
+ });
+ take_f32(0.1);
+
+ call_return_u128_pair();
+
+ bool_struct_in_11(bool_11 {
+ field0: true,
+ field1: true,
+ field2: true,
+ field3: true,
+ field4: true,
+ field5: true,
+ field6: true,
+ field7: true,
+ field8: true,
+ field9: true,
+ field10: true,
+ });
+
+ let slice = &[0, 1] as &[i32];
+ let slice_ptr = slice as *const [i32] as *const i32;
+
+ assert_eq!(slice_ptr as usize % 4, 0);
+
+ //return;
+
+ unsafe {
+ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+ let hello: &[u8] = b"Hello\0" as &[u8; 6];
+ let ptr: *const i8 = hello as *const [u8] as *const i8;
+ puts(ptr);
+
+ let world: Box<&str> = box "World!\0";
+ puts(*world as *const str as *const i8);
+ world as Box<dyn SomeTrait>;
+
+ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+ let chars = &['C', 'h', 'a', 'r', 's'];
+ let chars = chars as &[char];
+ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+ let a: &dyn SomeTrait = &"abc\0";
+ a.object_safe();
+
+ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+ assert!(!intrinsics::needs_drop::<u8>());
+ assert!(!intrinsics::needs_drop::<[u8]>());
+ assert!(intrinsics::needs_drop::<NoisyDrop>());
+ assert!(intrinsics::needs_drop::<NoisyDropUnsized>());
+
+ Unique {
+ pointer: NonNull(1 as *mut &str),
+ _marker: PhantomData,
+ } as Unique<dyn SomeTrait>;
+
+ struct MyDst<T: ?Sized>(T);
+
+ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+ struct Foo {
+ x: u8,
+ y: !,
+ }
+
+ unsafe fn uninitialized<T>() -> T {
+ MaybeUninit { uninit: () }.value.value
+ }
+
+ zeroed::<(u8, u8)>();
+ #[allow(unreachable_code)]
+ {
+ if false {
+ zeroed::<!>();
+ zeroed::<Foo>();
+ uninitialized::<Foo>();
+ }
+ }
+ }
+
+ let _ = box NoisyDrop {
+ text: "Boxed outer got dropped!\0",
+ inner: NoisyDropInner,
+ } as Box<dyn SomeTrait>;
+
+ const FUNC_REF: Option<fn()> = Some(main);
+ match FUNC_REF {
+ Some(_) => {},
+ None => assert!(false),
+ }
+
+ match Ordering::Less {
+ Ordering::Less => {},
+ _ => assert!(false),
+ }
+
+ [NoisyDropInner, NoisyDropInner];
+
+ let x = &[0u32, 42u32] as &[u32];
+ match x {
+ [] => assert_eq!(0u32, 1),
+ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+ }
+
+ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+ #[cfg(not(any(jit, windows)))]
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+ }
+
+ unsafe { assert_eq!(ABC as usize, 0); }
+ }
+
+ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+ let f = 1000.0;
+ assert_eq!(f as u8, 255);
+ let f2 = -1000.0;
+ assert_eq!(f2 as i8, -128);
+ assert_eq!(f2 as u8, 0);
+
+ let amount = 0;
+ assert_eq!(1u128 << amount, 1);
+
+ static ANOTHER_STATIC: &u8 = &A_STATIC;
+ assert_eq!(*ANOTHER_STATIC, 42);
+
+ check_niche_behavior();
+
+ extern "C" {
+ type ExternType;
+ }
+
+ struct ExternTypeWrapper {
+ _a: ExternType,
+ }
+
+ let nullptr = 0 as *const ();
+ let extern_nullptr = nullptr as *const ExternTypeWrapper;
+ extern_nullptr as *const ();
+ let slice_ptr = &[] as *const [u8];
+ slice_ptr as *const u8;
+
+ let repeat = [Some(42); 2];
+ assert_eq!(repeat[0], Some(42));
+ assert_eq!(repeat[1], Some(42));
+
+ from_decimal_string();
+
+ #[cfg(not(any(jit, windows)))]
+ test_tls();
+
+ #[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+ unsafe {
+ global_asm_test();
+ }
+
+ // Both statics have a reference that points to the same anonymous allocation.
+ static REF1: &u8 = &42;
+ static REF2: &u8 = REF1;
+ assert_eq!(*REF1, *REF2);
+
+ extern "C" {
+ type A;
+ }
+
+ fn main() {
+ let x: &A = unsafe { &*(1usize as *const A) };
+
+ assert_eq!(unsafe { intrinsics::size_of_val(x) }, 0);
+ assert_eq!(unsafe { intrinsics::min_align_of_val(x) }, 1);
+}
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+extern "C" {
+ fn global_asm_test();
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+global_asm! {
+ "
+ .global global_asm_test
+ global_asm_test:
+ // comment that would normally be removed by LLVM
+ ret
+ "
+}
+
+#[repr(C)]
+enum c_void {
+ _1,
+ _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+ __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+ fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ value: *mut c_void
+ ) -> c_int;
+
+ fn pthread_join(
+ native: pthread_t,
+ value: *mut *mut c_void
+ ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+ unsafe { TLS = 0; }
+ 0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+ unsafe {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ assert_eq!(TLS, 42);
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ let mut res = 0 as *mut c_void;
+ pthread_join(thread, &mut res);
+
+ // TLS of main thread must not have been changed by the other thread.
+ assert_eq!(TLS, 42);
+
+ puts("TLS works!\n\0" as *const str as *const i8);
+ }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+ V1 { f: bool },
+ V2 { f: Infallible },
+ V3,
+ V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+ V1 { f: bool },
+
+ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+ V3,
+ V4,
+}
+
+fn check_niche_behavior () {
+ if let E1::V2 { .. } = (E1::V1 { f: true }) {
+ intrinsics::abort();
+ }
+
+ if let E2::V1 { .. } = E2::V3::<Infallible> {
+ intrinsics::abort();
+ }
+}
+
+fn from_decimal_string() {
+ loop {
+ let multiplier = 1;
+
+ take_multiplier_ref(&multiplier);
+
+ if multiplier == 1 {
+ break;
+ }
+
+ unreachable();
+ }
+}
+
+fn take_multiplier_ref(_multiplier: &u128) {}
+
+fn unreachable() -> ! {
+ panic("unreachable")
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mod_bench.rs b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
new file mode 100644
index 000000000..e3e8a3c2d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
@@ -0,0 +1,36 @@
+#![feature(start, core_intrinsics, lang_items)]
+#![no_std]
+
+#[cfg_attr(unix, link(name = "c"))]
+#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+extern {}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ core::intrinsics::abort();
+}
+
+#[lang="eh_personality"]
+fn eh_personality(){}
+
+// Required for rustc_codegen_llvm
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+ core::intrinsics::unreachable();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ for i in 2..10_000_000 {
+ black_box((i + 1) % i);
+ }
+
+ 0
+}
+
+#[inline(never)]
+fn black_box(i: u32) {
+ if i != 1 {
+ core::intrinsics::abort();
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/std_example.rs b/compiler/rustc_codegen_cranelift/example/std_example.rs
new file mode 100644
index 000000000..0b5b6cd55
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/std_example.rs
@@ -0,0 +1,356 @@
+#![feature(core_intrinsics, generators, generator_trait, is_sorted, bench_black_box)]
+
+#[cfg(target_arch = "x86_64")]
+use std::arch::x86_64::*;
+use std::hint::black_box;
+use std::io::Write;
+use std::ops::Generator;
+
+fn main() {
+ println!("{:?}", std::env::args().collect::<Vec<_>>());
+
+ let mutex = std::sync::Mutex::new(());
+ let _guard = mutex.lock().unwrap();
+
+ let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+ let stderr = ::std::io::stderr();
+ let mut stderr = stderr.lock();
+
+ std::thread::spawn(move || {
+ println!("Hello from another thread!");
+ });
+
+ writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+ let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+ println!("cargo:rustc-link-lib=z");
+
+ static ONCE: std::sync::Once = std::sync::Once::new();
+ ONCE.call_once(|| {});
+
+ let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+ // Make sure ByValPair values with differently sized components are correctly passed
+ map(None::<(u8, Box<Instruction>)>);
+
+ println!("{}", 2.3f32.exp());
+ println!("{}", 2.3f32.exp2());
+ println!("{}", 2.3f32.abs());
+ println!("{}", 2.3f32.sqrt());
+ println!("{}", 2.3f32.floor());
+ println!("{}", 2.3f32.ceil());
+ println!("{}", 2.3f32.min(1.0));
+ println!("{}", 2.3f32.max(1.0));
+ println!("{}", 2.3f32.powi(2));
+ println!("{}", 2.3f32.log2());
+ assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+ println!("{}", 2.3f32.powf(2.0));
+
+ assert_eq!(i64::MAX.checked_mul(2), None);
+
+ assert_eq!(-128i8, (-128i8).saturating_sub(1));
+ assert_eq!(127i8, 127i8.saturating_sub(-128));
+ assert_eq!(-128i8, (-128i8).saturating_add(-128));
+ assert_eq!(127i8, 127i8.saturating_add(1));
+
+ assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+ assert_eq!(core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128), 170141183460469231731687303715884105727i128);
+
+ let _d = 0i128.checked_div(2i128);
+ let _d = 0u128.checked_div(2u128);
+ assert_eq!(1u128 + 2, 3);
+
+ assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+ let tmp = 353985398u128;
+ assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+ let tmp = -0x1234_5678_9ABC_DEF0i64;
+ assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+ // Check that all u/i128 <-> float casts work correctly.
+ let houndred_u128 = 100u128;
+ let houndred_i128 = 100i128;
+ let houndred_f32 = 100.0f32;
+ let houndred_f64 = 100.0f64;
+ assert_eq!(houndred_u128 as f32, 100.0);
+ assert_eq!(houndred_u128 as f64, 100.0);
+ assert_eq!(houndred_f32 as u128, 100);
+ assert_eq!(houndred_f64 as u128, 100);
+ assert_eq!(houndred_i128 as f32, 100.0);
+ assert_eq!(houndred_i128 as f64, 100.0);
+ assert_eq!(houndred_f32 as i128, 100);
+ assert_eq!(houndred_f64 as i128, 100);
+ assert_eq!(1u128.rotate_left(2), 4);
+
+ assert_eq!(black_box(f32::NAN) as i128, 0);
+ assert_eq!(black_box(f32::NAN) as u128, 0);
+
+ // Test signed 128bit comparing
+ let max = usize::MAX as i128;
+ if 100i128 < 0i128 || 100i128 > max {
+ panic!();
+ }
+
+ test_checked_mul();
+
+ let _a = 1u32 << 2u8;
+
+ let empty: [i32; 0] = [];
+ assert!(empty.is_sorted());
+
+ println!("{:?}", std::intrinsics::caller_location());
+
+ #[cfg(target_arch = "x86_64")]
+ unsafe {
+ test_simd();
+ }
+
+ Box::pin(move |mut _task_context| {
+ yield ();
+ }).as_mut().resume(0);
+
+ #[derive(Copy, Clone)]
+ enum Nums {
+ NegOne = -1,
+ }
+
+ let kind = Nums::NegOne;
+ assert_eq!(-1i128, kind as i128);
+
+ let options = [1u128];
+ match options[0] {
+ 1 => (),
+ 0 => loop {},
+ v => panic(v),
+ };
+
+ if black_box(false) {
+ // Based on https://github.com/rust-lang/rust/blob/2f320a224e827b400be25966755a621779f797cc/src/test/ui/debuginfo/debuginfo_with_uninhabitable_field_and_unsized.rs
+ let _ = Foo::<dyn Send>::new();
+
+ #[allow(dead_code)]
+ struct Foo<T: ?Sized> {
+ base: Never,
+ value: T,
+ }
+
+ impl<T: ?Sized> Foo<T> {
+ pub fn new() -> Box<Foo<T>> {
+ todo!()
+ }
+ }
+
+ enum Never {}
+ }
+}
+
+fn panic(_: u128) {
+ panic!();
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+ assert!(is_x86_feature_detected!("sse2"));
+
+ let x = _mm_setzero_si128();
+ let y = _mm_set1_epi16(7);
+ let or = _mm_or_si128(x, y);
+ let cmp_eq = _mm_cmpeq_epi8(y, y);
+ let cmp_lt = _mm_cmplt_epi8(y, y);
+
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+ test_mm_slli_si128();
+ test_mm_movemask_epi8();
+ test_mm256_movemask_epi8();
+ test_mm_add_epi8();
+ test_mm_add_pd();
+ test_mm_cvtepi8_epi16();
+ test_mm_cvtsi128_si64();
+
+ test_mm_extract_epi8();
+ test_mm_insert_epi16();
+
+ let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+ assert_eq!(mask1, 1);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 1);
+ let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 15);
+ let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 16);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+ 0b0101, 0b1111_0000u8 as i8, 0, 0,
+ 0, 0, 0b1111_0000u8 as i8, 0b0101,
+ 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+ );
+ let r = _mm_movemask_epi8(a);
+ assert_eq!(r, 0b10100100_00100101);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+ let a = _mm256_set1_epi8(-1);
+ let r = _mm256_movemask_epi8(a);
+ let e = -1;
+ assert_eq!(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+ let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ #[rustfmt::skip]
+ let b = _mm_setr_epi8(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ );
+ let r = _mm_add_epi8(a, b);
+ #[rustfmt::skip]
+ let e = _mm_setr_epi8(
+ 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+ );
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+ let a = _mm_setr_pd(1.0, 2.0);
+ let b = _mm_setr_pd(5.0, 10.0);
+ let r = _mm_add_pd(a, b);
+ assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+#[cfg(target_arch = "x86_64")]
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+ unsafe {
+ assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+ if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+ panic!("{:?} != {:?}", a, b);
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+ let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+ assert_eq!(r, 5);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+ let a = _mm_set1_epi8(10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(10);
+ assert_eq_m128i(r, e);
+ let a = _mm_set1_epi8(-10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(-10);
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ -1, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ );
+ let r1 = _mm_extract_epi8(a, 0);
+ let r2 = _mm_extract_epi8(a, 3);
+ assert_eq!(r1, 0xFF);
+ assert_eq!(r2, 3);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+ let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+ let r = _mm_insert_epi16(a, 9, 0);
+ let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+ assert_eq_m128i(r, e);
+}
+
+fn test_checked_mul() {
+ let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
+ assert_eq!(u, None);
+
+ assert_eq!(1u8.checked_mul(255u8), Some(255u8));
+ assert_eq!(255u8.checked_mul(255u8), None);
+ assert_eq!(1i8.checked_mul(127i8), Some(127i8));
+ assert_eq!(127i8.checked_mul(127i8), None);
+ assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
+ assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
+ assert_eq!((-128i8).checked_mul(-128i8), None);
+
+ assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
+ assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
+ assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
+ assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
+ assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
+ assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
+ assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
+}
+
+#[derive(PartialEq)]
+enum LoopState {
+ Continue(()),
+ Break(())
+}
+
+pub enum Instruction {
+ Increment,
+ Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+ match a {
+ None => None,
+ Some((_, instr)) => Some(instr),
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
new file mode 100644
index 000000000..2cb84786f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
@@ -0,0 +1,97 @@
+// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
+
+// Test that array subslice patterns are correctly handled in const evaluation.
+
+// run-pass
+
+#[derive(PartialEq, Debug, Clone)]
+struct N(u8);
+
+#[derive(PartialEq, Debug, Clone)]
+struct Z;
+
+macro_rules! n {
+ ($($e:expr),* $(,)?) => {
+ [$(N($e)),*]
+ }
+}
+
+// This macro has an unused variable so that it can be repeated base on the
+// number of times a repeated variable (`$e` in `z`) occurs.
+macro_rules! zed {
+ ($e:expr) => { Z }
+}
+
+macro_rules! z {
+ ($($e:expr),* $(,)?) => {
+ [$(zed!($e)),*]
+ }
+}
+
+// Compare constant evaluation and runtime evaluation of a given expression.
+macro_rules! compare_evaluation {
+ ($e:expr, $t:ty $(,)?) => {{
+ const CONST_EVAL: $t = $e;
+ const fn const_eval() -> $t { $e }
+ static CONST_EVAL2: $t = const_eval();
+ let runtime_eval = $e;
+ assert_eq!(CONST_EVAL, runtime_eval);
+ assert_eq!(CONST_EVAL2, runtime_eval);
+ }}
+}
+
+// Repeat `$test`, substituting the given macro variables with the given
+// identifiers.
+//
+// For example:
+//
+// repeat! {
+// ($name); X; Y:
+// struct $name;
+// }
+//
+// Expands to:
+//
+// struct X; struct Y;
+//
+// This is used to repeat the tests using both the `N` and `Z`
+// types.
+macro_rules! repeat {
+ (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
+ macro_rules! single {
+ ($($dollar $placeholder:ident),*) => { $($test)* }
+ }
+ $(single!($($values),+);)*
+ }
+}
+
+fn main() {
+ repeat! {
+ ($arr $Ty); n, N; z, Z:
+ compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
+ compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+ compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+
+ compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
+ compare_evaluation!(
+ { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+ compare_evaluation!(
+ { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+
+ compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
+ compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
+ compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
+ }
+
+ compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
+
+ compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
new file mode 100644
index 000000000..93bab17e4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
@@ -0,0 +1,40 @@
+// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
+
+// run-pass
+
+use std::panic::Location;
+
+#[track_caller]
+fn tracked() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_intrinsic() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_tracked() -> &'static Location<'static> {
+ tracked()
+}
+
+fn main() {
+ let location = Location::caller();
+ assert_eq!(location.file(), file!());
+ assert_eq!(location.line(), 21);
+ assert_eq!(location.column(), 20);
+
+ let tracked = tracked();
+ assert_eq!(tracked.file(), file!());
+ assert_eq!(tracked.line(), 26);
+ assert_eq!(tracked.column(), 19);
+
+ let nested = nested_intrinsic();
+ assert_eq!(nested.file(), file!());
+ assert_eq!(nested.line(), 13);
+ assert_eq!(nested.column(), 5);
+
+ let contained = nested_tracked();
+ assert_eq!(contained.file(), file!());
+ assert_eq!(contained.line(), 17);
+ assert_eq!(contained.column(), 5);
+}