summaryrefslogtreecommitdiffstats
path: root/rust
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
commit9f0fc191371843c4fc000a226b0a26b6c059aacd (patch)
tree35f8be3ef04506ac891ad001e8c41e535ae8d01d /rust
parentReleasing progress-linux version 6.6.15-2~progress7.99u1. (diff)
downloadlinux-9f0fc191371843c4fc000a226b0a26b6c059aacd.tar.xz
linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'rust')
-rw-r--r--rust/Makefile10
-rw-r--r--rust/alloc/alloc.rs21
-rw-r--r--rust/alloc/boxed.rs56
-rw-r--r--rust/alloc/lib.rs13
-rw-r--r--rust/alloc/raw_vec.rs30
-rw-r--r--rust/alloc/vec/drain_filter.rs199
-rw-r--r--rust/alloc/vec/extract_if.rs115
-rw-r--r--rust/alloc/vec/mod.rs110
-rw-r--r--rust/alloc/vec/spec_extend.rs8
-rw-r--r--rust/bindings/bindings_helper.h1
-rw-r--r--rust/compiler_builtins.rs1
-rw-r--r--rust/helpers.c13
-rw-r--r--rust/kernel/init.rs20
-rw-r--r--rust/kernel/lib.rs3
-rw-r--r--rust/kernel/print.rs1
-rw-r--r--rust/kernel/sync/arc.rs48
-rw-r--r--rust/kernel/sync/condvar.rs1
-rw-r--r--rust/kernel/sync/lock.rs1
-rw-r--r--rust/kernel/task.rs2
-rw-r--r--rust/kernel/workqueue.rs679
20 files changed, 997 insertions, 335 deletions
diff --git a/rust/Makefile b/rust/Makefile
index 7dbf9abe0d..543b37f6c7 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -336,13 +336,13 @@ quiet_cmd_bindgen = BINDGEN $@
$(bindgen_target_cflags) $(bindgen_target_extra)
$(obj)/bindings/bindings_generated.rs: private bindgen_target_flags = \
- $(shell grep -v '^#\|^$$' $(srctree)/$(src)/bindgen_parameters)
+ $(shell grep -Ev '^#|^$$' $(srctree)/$(src)/bindgen_parameters)
$(obj)/bindings/bindings_generated.rs: $(src)/bindings/bindings_helper.h \
$(src)/bindgen_parameters FORCE
$(call if_changed_dep,bindgen)
$(obj)/uapi/uapi_generated.rs: private bindgen_target_flags = \
- $(shell grep -v '^#\|^$$' $(srctree)/$(src)/bindgen_parameters)
+ $(shell grep -Ev '^#|^$$' $(srctree)/$(src)/bindgen_parameters)
$(obj)/uapi/uapi_generated.rs: $(src)/uapi/uapi_helper.h \
$(src)/bindgen_parameters FORCE
$(call if_changed_dep,bindgen)
@@ -364,9 +364,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
$(NM) -p --defined-only $< \
- | grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \
- | xargs -Isymbol \
- echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@
+ | awk '/ (T|R|D) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
@@ -383,6 +381,8 @@ $(obj)/exports_kernel_generated.h: $(obj)/kernel.o FORCE
quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
cmd_rustc_procmacro = \
$(RUSTC_OR_CLIPPY) $(rust_common_flags) \
+ -Clinker-flavor=gcc -Clinker=$(HOSTCC) \
+ -Clink-args='$(call escsq,$(KBUILD_HOSTLDFLAGS))' \
--emit=dep-info=$(depfile) --emit=link=$@ --extern proc_macro \
--crate-type proc-macro \
--crate-name $(patsubst lib%.so,%,$(notdir $@)) $<
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
index 0b6bf5b6da..8cb4a31cf6 100644
--- a/rust/alloc/alloc.rs
+++ b/rust/alloc/alloc.rs
@@ -6,9 +6,7 @@
#[cfg(not(test))]
use core::intrinsics;
-use core::intrinsics::{min_align_of_val, size_of_val};
-use core::ptr::Unique;
#[cfg(not(test))]
use core::ptr::{self, NonNull};
@@ -40,7 +38,6 @@ extern "Rust" {
#[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
- #[cfg(not(bootstrap))]
static __rust_no_alloc_shim_is_unstable: u8;
}
@@ -98,7 +95,6 @@ pub unsafe fn alloc(layout: Layout) -> *mut u8 {
unsafe {
// Make sure we don't accidentally allow omitting the allocator shim in
// stable code until it is actually stabilized.
- #[cfg(not(bootstrap))]
core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
__rust_alloc(layout.size(), layout.align())
@@ -339,22 +335,6 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
}
}
-#[cfg_attr(not(test), lang = "box_free")]
-#[inline]
-// This signature has to be the same as `Box`, otherwise an ICE will happen.
-// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
-// well.
-// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
-// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
- unsafe {
- let size = size_of_val(ptr.as_ref());
- let align = min_align_of_val(ptr.as_ref());
- let layout = Layout::from_size_align_unchecked(size, align);
- alloc.deallocate(From::from(ptr.cast()), layout)
- }
-}
-
// # Allocation error handler
#[cfg(not(no_global_oom_handling))]
@@ -414,7 +394,6 @@ pub mod __alloc_error_handler {
static __rust_alloc_error_handler_should_panic: u8;
}
- #[allow(unused_unsafe)]
if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
panic!("memory allocation of {size} bytes failed")
} else {
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
index c8173cea83..9620eba172 100644
--- a/rust/alloc/boxed.rs
+++ b/rust/alloc/boxed.rs
@@ -159,12 +159,12 @@ use core::hash::{Hash, Hasher};
use core::iter::FusedIterator;
use core::marker::Tuple;
use core::marker::Unsize;
-use core::mem;
+use core::mem::{self, SizedTypeProperties};
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
};
use core::pin::Pin;
-use core::ptr::{self, Unique};
+use core::ptr::{self, NonNull, Unique};
use core::task::{Context, Poll};
#[cfg(not(no_global_oom_handling))]
@@ -483,8 +483,12 @@ impl<T, A: Allocator> Box<T, A> {
where
A: Allocator,
{
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- let ptr = alloc.allocate(layout)?.cast();
+ let ptr = if T::IS_ZST {
+ NonNull::dangling()
+ } else {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ alloc.allocate(layout)?.cast()
+ };
unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
}
@@ -553,8 +557,12 @@ impl<T, A: Allocator> Box<T, A> {
where
A: Allocator,
{
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- let ptr = alloc.allocate_zeroed(layout)?.cast();
+ let ptr = if T::IS_ZST {
+ NonNull::dangling()
+ } else {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ alloc.allocate_zeroed(layout)?.cast()
+ };
unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
}
@@ -679,14 +687,16 @@ impl<T> Box<[T]> {
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
- unsafe {
+ let ptr = if T::IS_ZST || len == 0 {
+ NonNull::dangling()
+ } else {
let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
Ok(l) => l,
Err(_) => return Err(AllocError),
};
- let ptr = Global.allocate(layout)?;
- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
- }
+ Global.allocate(layout)?.cast()
+ };
+ unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
}
/// Constructs a new boxed slice with uninitialized contents, with the memory
@@ -711,14 +721,16 @@ impl<T> Box<[T]> {
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn try_new_zeroed_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
- unsafe {
+ let ptr = if T::IS_ZST || len == 0 {
+ NonNull::dangling()
+ } else {
let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
Ok(l) => l,
Err(_) => return Err(AllocError),
};
- let ptr = Global.allocate_zeroed(layout)?;
- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
- }
+ Global.allocate_zeroed(layout)?.cast()
+ };
+ unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
}
}
@@ -1215,8 +1227,18 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
+ #[inline]
fn drop(&mut self) {
- // FIXME: Do nothing, drop is currently performed by compiler.
+ // the T in the Box is dropped by the compiler before the destructor is run
+
+ let ptr = self.0;
+
+ unsafe {
+ let layout = Layout::for_value_raw(ptr.as_ptr());
+ if layout.size() != 0 {
+ self.1.deallocate(From::from(ptr.cast()), layout);
+ }
+ }
}
}
@@ -2165,7 +2187,7 @@ impl dyn Error + Send {
let err: Box<dyn Error> = self;
<dyn Error>::downcast(err).map_err(|s| unsafe {
// Reapply the `Send` marker.
- mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
+ Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send))
})
}
}
@@ -2179,7 +2201,7 @@ impl dyn Error + Send + Sync {
let err: Box<dyn Error> = self;
<dyn Error>::downcast(err).map_err(|s| unsafe {
// Reapply the `Send + Sync` marker.
- mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
+ Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send + Sync))
})
}
}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
index 85e91356ec..73b9ffd845 100644
--- a/rust/alloc/lib.rs
+++ b/rust/alloc/lib.rs
@@ -58,6 +58,11 @@
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
+// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no effect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
+//
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
@@ -77,11 +82,6 @@
))]
#![no_std]
#![needs_allocator]
-// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
-// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
-#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
-//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(fuzzy_provenance_casts)]
@@ -90,6 +90,8 @@
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
#![warn(multiple_supertrait_upcastable)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
//
// Library features:
// tidy-alphabetical-start
@@ -139,7 +141,6 @@
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
-#![feature(provide_any)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
index 65d5ce1582..a7425582a3 100644
--- a/rust/alloc/raw_vec.rs
+++ b/rust/alloc/raw_vec.rs
@@ -471,16 +471,26 @@ impl<T, A: Allocator> RawVec<T, A> {
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
// See current_memory() why this assert is here
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
- let ptr = unsafe {
- // `Layout::array` cannot overflow here because it would have
- // overflowed earlier when capacity was larger.
- let new_size = mem::size_of::<T>().unchecked_mul(cap);
- let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
- self.alloc
- .shrink(ptr, layout, new_layout)
- .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
- };
- self.set_ptr_and_cap(ptr, cap);
+
+ // If shrinking to 0, deallocate the buffer. We don't reach this point
+ // for the T::IS_ZST case since current_memory() will have returned
+ // None.
+ if cap == 0 {
+ unsafe { self.alloc.deallocate(ptr, layout) };
+ self.ptr = Unique::dangling();
+ self.cap = 0;
+ } else {
+ let ptr = unsafe {
+ // `Layout::array` cannot overflow here because it would have
+ // overflowed earlier when capacity was larger.
+ let new_size = mem::size_of::<T>().unchecked_mul(cap);
+ let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+ self.alloc
+ .shrink(ptr, layout, new_layout)
+ .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+ };
+ self.set_ptr_and_cap(ptr, cap);
+ }
Ok(())
}
}
diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
deleted file mode 100644
index 09efff090e..0000000000
--- a/rust/alloc/vec/drain_filter.rs
+++ /dev/null
@@ -1,199 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-use crate::alloc::{Allocator, Global};
-use core::mem::{ManuallyDrop, SizedTypeProperties};
-use core::ptr;
-use core::slice;
-
-use super::Vec;
-
-/// An iterator which uses a closure to determine if an element should be removed.
-///
-/// This struct is created by [`Vec::drain_filter`].
-/// See its documentation for more.
-///
-/// # Example
-///
-/// ```
-/// #![feature(drain_filter)]
-///
-/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::DrainFilter<'_, _, _> = v.drain_filter(|x| *x % 2 == 0);
-/// ```
-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
-#[derive(Debug)]
-pub struct DrainFilter<
- 'a,
- T,
- F,
- #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
-> where
- F: FnMut(&mut T) -> bool,
-{
- pub(super) vec: &'a mut Vec<T, A>,
- /// The index of the item that will be inspected by the next call to `next`.
- pub(super) idx: usize,
- /// The number of items that have been drained (removed) thus far.
- pub(super) del: usize,
- /// The original length of `vec` prior to draining.
- pub(super) old_len: usize,
- /// The filter test predicate.
- pub(super) pred: F,
- /// A flag that indicates a panic has occurred in the filter test predicate.
- /// This is used as a hint in the drop implementation to prevent consumption
- /// of the remainder of the `DrainFilter`. Any unprocessed items will be
- /// backshifted in the `vec`, but no further items will be dropped or
- /// tested by the filter predicate.
- pub(super) panic_flag: bool,
-}
-
-impl<T, F, A: Allocator> DrainFilter<'_, T, F, A>
-where
- F: FnMut(&mut T) -> bool,
-{
- /// Returns a reference to the underlying allocator.
- #[unstable(feature = "allocator_api", issue = "32838")]
- #[inline]
- pub fn allocator(&self) -> &A {
- self.vec.allocator()
- }
-
- /// Keep unyielded elements in the source `Vec`.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(drain_filter)]
- /// #![feature(drain_keep_rest)]
- ///
- /// let mut vec = vec!['a', 'b', 'c'];
- /// let mut drain = vec.drain_filter(|_| true);
- ///
- /// assert_eq!(drain.next().unwrap(), 'a');
- ///
- /// // This call keeps 'b' and 'c' in the vec.
- /// drain.keep_rest();
- ///
- /// // If we wouldn't call `keep_rest()`,
- /// // `vec` would be empty.
- /// assert_eq!(vec, ['b', 'c']);
- /// ```
- #[unstable(feature = "drain_keep_rest", issue = "101122")]
- pub fn keep_rest(self) {
- // At this moment layout looks like this:
- //
- // _____________________/-- old_len
- // / \
- // [kept] [yielded] [tail]
- // \_______/ ^-- idx
- // \-- del
- //
- // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
- //
- // 1. Move [tail] after [kept]
- // 2. Update length of the original vec to `old_len - del`
- // a. In case of ZST, this is the only thing we want to do
- // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
- let mut this = ManuallyDrop::new(self);
-
- unsafe {
- // ZSTs have no identity, so we don't need to move them around.
- if !T::IS_ZST && this.idx < this.old_len && this.del > 0 {
- let ptr = this.vec.as_mut_ptr();
- let src = ptr.add(this.idx);
- let dst = src.sub(this.del);
- let tail_len = this.old_len - this.idx;
- src.copy_to(dst, tail_len);
- }
-
- let new_len = this.old_len - this.del;
- this.vec.set_len(new_len);
- }
- }
-}
-
-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
-impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A>
-where
- F: FnMut(&mut T) -> bool,
-{
- type Item = T;
-
- fn next(&mut self) -> Option<T> {
- unsafe {
- while self.idx < self.old_len {
- let i = self.idx;
- let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
- self.panic_flag = true;
- let drained = (self.pred)(&mut v[i]);
- self.panic_flag = false;
- // Update the index *after* the predicate is called. If the index
- // is updated prior and the predicate panics, the element at this
- // index would be leaked.
- self.idx += 1;
- if drained {
- self.del += 1;
- return Some(ptr::read(&v[i]));
- } else if self.del > 0 {
- let del = self.del;
- let src: *const T = &v[i];
- let dst: *mut T = &mut v[i - del];
- ptr::copy_nonoverlapping(src, dst, 1);
- }
- }
- None
- }
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- (0, Some(self.old_len - self.idx))
- }
-}
-
-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
-impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A>
-where
- F: FnMut(&mut T) -> bool,
-{
- fn drop(&mut self) {
- struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator>
- where
- F: FnMut(&mut T) -> bool,
- {
- drain: &'b mut DrainFilter<'a, T, F, A>,
- }
-
- impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A>
- where
- F: FnMut(&mut T) -> bool,
- {
- fn drop(&mut self) {
- unsafe {
- if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
- // This is a pretty messed up state, and there isn't really an
- // obviously right thing to do. We don't want to keep trying
- // to execute `pred`, so we just backshift all the unprocessed
- // elements and tell the vec that they still exist. The backshift
- // is required to prevent a double-drop of the last successfully
- // drained item prior to a panic in the predicate.
- let ptr = self.drain.vec.as_mut_ptr();
- let src = ptr.add(self.drain.idx);
- let dst = src.sub(self.drain.del);
- let tail_len = self.drain.old_len - self.drain.idx;
- src.copy_to(dst, tail_len);
- }
- self.drain.vec.set_len(self.drain.old_len - self.drain.del);
- }
- }
- }
-
- let backshift = BackshiftOnDrop { drain: self };
-
- // Attempt to consume any remaining elements if the filter predicate
- // has not yet panicked. We'll backshift any remaining elements
- // whether we've already panicked or if the consumption here panics.
- if !backshift.drain.panic_flag {
- backshift.drain.for_each(drop);
- }
- }
-}
diff --git a/rust/alloc/vec/extract_if.rs b/rust/alloc/vec/extract_if.rs
new file mode 100644
index 0000000000..f314a51d4d
--- /dev/null
+++ b/rust/alloc/vec/extract_if.rs
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::{Allocator, Global};
+use core::ptr;
+use core::slice;
+
+use super::Vec;
+
+/// An iterator which uses a closure to determine if an element should be removed.
+///
+/// This struct is created by [`Vec::extract_if`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// #![feature(extract_if)]
+///
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::ExtractIf<'_, _, _> = v.extract_if(|x| *x % 2 == 0);
+/// ```
+#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
+#[derive(Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct ExtractIf<
+ 'a,
+ T,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> where
+ F: FnMut(&mut T) -> bool,
+{
+ pub(super) vec: &'a mut Vec<T, A>,
+ /// The index of the item that will be inspected by the next call to `next`.
+ pub(super) idx: usize,
+ /// The number of items that have been drained (removed) thus far.
+ pub(super) del: usize,
+ /// The original length of `vec` prior to draining.
+ pub(super) old_len: usize,
+ /// The filter test predicate.
+ pub(super) pred: F,
+}
+
+impl<T, F, A: Allocator> ExtractIf<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.vec.allocator()
+ }
+}
+
+#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ unsafe {
+ while self.idx < self.old_len {
+ let i = self.idx;
+ let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
+ let drained = (self.pred)(&mut v[i]);
+ // Update the index *after* the predicate is called. If the index
+ // is updated prior and the predicate panics, the element at this
+ // index would be leaked.
+ self.idx += 1;
+ if drained {
+ self.del += 1;
+ return Some(ptr::read(&v[i]));
+ } else if self.del > 0 {
+ let del = self.del;
+ let src: *const T = &v[i];
+ let dst: *mut T = &mut v[i - del];
+ ptr::copy_nonoverlapping(src, dst, 1);
+ }
+ }
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.old_len - self.idx))
+ }
+}
+
+#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Drop for ExtractIf<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn drop(&mut self) {
+ unsafe {
+ if self.idx < self.old_len && self.del > 0 {
+ // This is a pretty messed up state, and there isn't really an
+ // obviously right thing to do. We don't want to keep trying
+ // to execute `pred`, so we just backshift all the unprocessed
+ // elements and tell the vec that they still exist. The backshift
+ // is required to prevent a double-drop of the last successfully
+ // drained item prior to a panic in the predicate.
+ let ptr = self.vec.as_mut_ptr();
+ let src = ptr.add(self.idx);
+ let dst = src.sub(self.del);
+ let tail_len = self.old_len - self.idx;
+ src.copy_to(dst, tail_len);
+ }
+ self.vec.set_len(self.old_len - self.del);
+ }
+ }
+}
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index 05c70de022..209a88cfe5 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -74,10 +74,10 @@ use crate::boxed::Box;
use crate::collections::{TryReserveError, TryReserveErrorKind};
use crate::raw_vec::RawVec;
-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
-pub use self::drain_filter::DrainFilter;
+#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
+pub use self::extract_if::ExtractIf;
-mod drain_filter;
+mod extract_if;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_splice", since = "1.21.0")]
@@ -216,7 +216,7 @@ mod spec_extend;
///
/// # Indexing
///
-/// The `Vec` type allows to access values by index, because it implements the
+/// The `Vec` type allows access to values by index, because it implements the
/// [`Index`] trait. An example will be more explicit:
///
/// ```
@@ -618,22 +618,20 @@ impl<T> Vec<T> {
/// Using memory that was allocated elsewhere:
///
/// ```rust
- /// #![feature(allocator_api)]
- ///
- /// use std::alloc::{AllocError, Allocator, Global, Layout};
+ /// use std::alloc::{alloc, Layout};
///
/// fn main() {
/// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
///
/// let vec = unsafe {
- /// let mem = match Global.allocate(layout) {
- /// Ok(mem) => mem.cast::<u32>().as_ptr(),
- /// Err(AllocError) => return,
- /// };
+ /// let mem = alloc(layout).cast::<u32>();
+ /// if mem.is_null() {
+ /// return;
+ /// }
///
/// mem.write(1_000_000);
///
- /// Vec::from_raw_parts_in(mem, 1, 16, Global)
+ /// Vec::from_raw_parts(mem, 1, 16)
/// };
///
/// assert_eq!(vec, &[1_000_000]);
@@ -876,19 +874,22 @@ impl<T, A: Allocator> Vec<T, A> {
/// Using memory that was allocated elsewhere:
///
/// ```rust
- /// use std::alloc::{alloc, Layout};
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{AllocError, Allocator, Global, Layout};
///
/// fn main() {
/// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ ///
/// let vec = unsafe {
- /// let mem = alloc(layout).cast::<u32>();
- /// if mem.is_null() {
- /// return;
- /// }
+ /// let mem = match Global.allocate(layout) {
+ /// Ok(mem) => mem.cast::<u32>().as_ptr(),
+ /// Err(AllocError) => return,
+ /// };
///
/// mem.write(1_000_000);
///
- /// Vec::from_raw_parts(mem, 1, 16)
+ /// Vec::from_raw_parts_in(mem, 1, 16, Global)
/// };
///
/// assert_eq!(vec, &[1_000_000]);
@@ -2507,7 +2508,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
let len = self.len();
if new_len > len {
- self.extend_with(new_len - len, ExtendElement(value))
+ self.extend_with(new_len - len, value)
} else {
self.truncate(new_len);
}
@@ -2545,7 +2546,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
let len = self.len();
if new_len > len {
- self.try_extend_with(new_len - len, ExtendElement(value))
+ self.try_extend_with(new_len - len, value)
} else {
self.truncate(new_len);
Ok(())
@@ -2684,26 +2685,10 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
}
}
-// This code generalizes `extend_with_{element,default}`.
-trait ExtendWith<T> {
- fn next(&mut self) -> T;
- fn last(self) -> T;
-}
-
-struct ExtendElement<T>(T);
-impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
- fn next(&mut self) -> T {
- self.0.clone()
- }
- fn last(self) -> T {
- self.0
- }
-}
-
-impl<T, A: Allocator> Vec<T, A> {
+impl<T: Clone, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
- /// Extend the vector by `n` values, using the given generator.
- fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+ /// Extend the vector by `n` clones of value.
+ fn extend_with(&mut self, n: usize, value: T) {
self.reserve(n);
unsafe {
@@ -2715,15 +2700,15 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
- ptr::write(ptr, value.next());
+ ptr::write(ptr, value.clone());
ptr = ptr.add(1);
- // Increment the length in every step in case next() panics
+ // Increment the length in every step in case clone() panics
local_len.increment_len(1);
}
if n > 0 {
// We can write the last element directly without cloning needlessly
- ptr::write(ptr, value.last());
+ ptr::write(ptr, value);
local_len.increment_len(1);
}
@@ -2731,8 +2716,8 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
- /// Try to extend the vector by `n` values, using the given generator.
- fn try_extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) -> Result<(), TryReserveError> {
+ /// Try to extend the vector by `n` clones of value.
+ fn try_extend_with(&mut self, n: usize, value: T) -> Result<(), TryReserveError> {
self.try_reserve(n)?;
unsafe {
@@ -2744,15 +2729,15 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
- ptr::write(ptr, value.next());
+ ptr::write(ptr, value.clone());
ptr = ptr.add(1);
- // Increment the length in every step in case next() panics
+ // Increment the length in every step in case clone() panics
local_len.increment_len(1);
}
if n > 0 {
// We can write the last element directly without cloning needlessly
- ptr::write(ptr, value.last());
+ ptr::write(ptr, value);
local_len.increment_len(1);
}
@@ -3210,6 +3195,12 @@ impl<T, A: Allocator> Vec<T, A> {
/// If the closure returns false, the element will remain in the vector and will not be yielded
/// by the iterator.
///
+ /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
+ /// or the iteration short-circuits, then the remaining elements will be retained.
+ /// Use [`retain`] with a negated predicate if you do not need the returned iterator.
+ ///
+ /// [`retain`]: Vec::retain
+ ///
/// Using this method is equivalent to the following code:
///
/// ```
@@ -3228,10 +3219,10 @@ impl<T, A: Allocator> Vec<T, A> {
/// # assert_eq!(vec, vec![1, 4, 5]);
/// ```
///
- /// But `drain_filter` is easier to use. `drain_filter` is also more efficient,
+ /// But `extract_if` is easier to use. `extract_if` is also more efficient,
/// because it can backshift the elements of the array in bulk.
///
- /// Note that `drain_filter` also lets you mutate every element in the filter closure,
+ /// Note that `extract_if` also lets you mutate every element in the filter closure,
/// regardless of whether you choose to keep or remove it.
///
/// # Examples
@@ -3239,17 +3230,17 @@ impl<T, A: Allocator> Vec<T, A> {
/// Splitting an array into evens and odds, reusing the original allocation:
///
/// ```
- /// #![feature(drain_filter)]
+ /// #![feature(extract_if)]
/// let mut numbers = vec![1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15];
///
- /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ /// let evens = numbers.extract_if(|x| *x % 2 == 0).collect::<Vec<_>>();
/// let odds = numbers;
///
/// assert_eq!(evens, vec![2, 4, 6, 8, 14]);
/// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]);
/// ```
- #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
- pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
+ #[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
+ pub fn extract_if<F>(&mut self, filter: F) -> ExtractIf<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
@@ -3260,7 +3251,7 @@ impl<T, A: Allocator> Vec<T, A> {
self.set_len(0);
}
- DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false }
+ ExtractIf { vec: self, idx: 0, del: 0, old_len, pred: filter }
}
}
@@ -3272,7 +3263,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`copy_from_slice`]: slice::copy_from_slice
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "extend_ref", since = "1.2.0")]
-impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
+impl<'a, T: Copy + 'a, A: Allocator> Extend<&'a T> for Vec<T, A> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.spec_extend(iter.into_iter())
}
@@ -3290,9 +3281,14 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
+impl<T, A1, A2> PartialOrd<Vec<T, A2>> for Vec<T, A1>
+where
+ T: PartialOrd,
+ A1: Allocator,
+ A2: Allocator,
+{
#[inline]
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ fn partial_cmp(&self, other: &Vec<T, A2>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
index a6a735201e..ada9195374 100644
--- a/rust/alloc/vec/spec_extend.rs
+++ b/rust/alloc/vec/spec_extend.rs
@@ -77,7 +77,7 @@ impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
}
#[cfg(not(no_global_oom_handling))]
-impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for Vec<T, A>
where
I: Iterator<Item = &'a T>,
T: Clone,
@@ -87,7 +87,7 @@ where
}
}
-impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
+impl<'a, T: 'a, I, A: Allocator> TrySpecExtend<&'a T, I> for Vec<T, A>
where
I: Iterator<Item = &'a T>,
T: Clone,
@@ -98,7 +98,7 @@ where
}
#[cfg(not(no_global_oom_handling))]
-impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
where
T: Copy,
{
@@ -108,7 +108,7 @@ where
}
}
-impl<'a, T: 'a, A: Allocator + 'a> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+impl<'a, T: 'a, A: Allocator> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
where
T: Copy,
{
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index c91a3c24f6..85f013ed4c 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -12,6 +12,7 @@
#include <linux/refcount.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/workqueue.h>
/* `bindgen` gets confused at certain things. */
const size_t BINDINGS_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index fb8ac3f211..bba2922c6e 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -19,6 +19,7 @@
//! [`compiler_builtins`]: https://github.com/rust-lang/compiler-builtins
//! [`compiler-rt`]: https://compiler-rt.llvm.org/
+#![allow(internal_features)]
#![feature(compiler_builtins)]
#![compiler_builtins]
#![no_builtins]
diff --git a/rust/helpers.c b/rust/helpers.c
index 4c86fe4a7e..70e59efd92 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -30,6 +30,7 @@
#include <linux/sched/signal.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
__noreturn void rust_helper_BUG(void)
{
@@ -144,6 +145,18 @@ struct kunit *rust_helper_kunit_get_current_test(void)
}
EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
+void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
+ bool onstack, const char *name,
+ struct lock_class_key *key)
+{
+ __init_work(work, onstack);
+ work->data = (atomic_long_t)WORK_DATA_INIT();
+ lockdep_init_map(&work->lockdep_map, name, key, 0);
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_work_with_key);
+
/*
* `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
* use it in contexts where Rust expects a `usize` like slice (array) indices.
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 4ebb6f23fc..65be9ae57b 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -35,7 +35,7 @@
//! that you need to write `<-` instead of `:` for fields that you want to initialize in-place.
//!
//! ```rust
-//! # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+//! # #![allow(clippy::disallowed_names)]
//! use kernel::{prelude::*, sync::Mutex, new_mutex};
//! # use core::pin::Pin;
//! #[pin_data]
@@ -55,7 +55,7 @@
//! (or just the stack) to actually initialize a `Foo`:
//!
//! ```rust
-//! # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+//! # #![allow(clippy::disallowed_names)]
//! # use kernel::{prelude::*, sync::Mutex, new_mutex};
//! # use core::pin::Pin;
//! # #[pin_data]
@@ -86,7 +86,7 @@
//! To declare an init macro/function you just return an [`impl PinInit<T, E>`]:
//!
//! ```rust
-//! # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+//! # #![allow(clippy::disallowed_names)]
//! # use kernel::{sync::Mutex, prelude::*, new_mutex, init::PinInit, try_pin_init};
//! #[pin_data]
//! struct DriverData {
@@ -236,7 +236,7 @@ pub mod macros;
/// # Examples
///
/// ```rust
-/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # #![allow(clippy::disallowed_names)]
/// # use kernel::{init, macros::pin_data, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
/// # use core::pin::Pin;
/// #[pin_data]
@@ -288,7 +288,7 @@ macro_rules! stack_pin_init {
/// # Examples
///
/// ```rust,ignore
-/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # #![allow(clippy::disallowed_names)]
/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
/// # use macros::pin_data;
/// # use core::{alloc::AllocError, pin::Pin};
@@ -314,7 +314,7 @@ macro_rules! stack_pin_init {
/// ```
///
/// ```rust,ignore
-/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # #![allow(clippy::disallowed_names)]
/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
/// # use macros::pin_data;
/// # use core::{alloc::AllocError, pin::Pin};
@@ -366,7 +366,7 @@ macro_rules! stack_try_pin_init {
/// The syntax is almost identical to that of a normal `struct` initializer:
///
/// ```rust
-/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # #![allow(clippy::disallowed_names)]
/// # use kernel::{init, pin_init, macros::pin_data, init::*};
/// # use core::pin::Pin;
/// #[pin_data]
@@ -411,7 +411,7 @@ macro_rules! stack_try_pin_init {
/// To create an initializer function, simply declare it like this:
///
/// ```rust
-/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # #![allow(clippy::disallowed_names)]
/// # use kernel::{init, pin_init, prelude::*, init::*};
/// # use core::pin::Pin;
/// # #[pin_data]
@@ -438,7 +438,7 @@ macro_rules! stack_try_pin_init {
/// Users of `Foo` can now create it like this:
///
/// ```rust
-/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # #![allow(clippy::disallowed_names)]
/// # use kernel::{init, pin_init, macros::pin_data, init::*};
/// # use core::pin::Pin;
/// # #[pin_data]
@@ -466,7 +466,7 @@ macro_rules! stack_try_pin_init {
/// They can also easily embed it into their own `struct`s:
///
/// ```rust
-/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # #![allow(clippy::disallowed_names)]
/// # use kernel::{init, pin_init, macros::pin_data, init::*};
/// # use core::pin::Pin;
/// # #[pin_data]
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index e881170023..e6aff80b52 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -16,6 +16,8 @@
#![feature(coerce_unsized)]
#![feature(dispatch_from_dyn)]
#![feature(new_uninit)]
+#![feature(offset_of)]
+#![feature(ptr_metadata)]
#![feature(receiver_trait)]
#![feature(unsize)]
@@ -45,6 +47,7 @@ pub mod str;
pub mod sync;
pub mod task;
pub mod types;
+pub mod workqueue;
#[doc(hidden)]
pub use bindings;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index 8009184bf6..f48926e3e9 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -399,6 +399,7 @@ macro_rules! pr_debug (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
+/// [`pr_info!`]: crate::pr_info!
/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 3d496391a9..77cdbcf7bd 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -24,13 +24,13 @@ use crate::{
};
use alloc::boxed::Box;
use core::{
- alloc::AllocError,
+ alloc::{AllocError, Layout},
fmt,
marker::{PhantomData, Unsize},
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
pin::Pin,
- ptr::NonNull,
+ ptr::{NonNull, Pointee},
};
use macros::pin_data;
@@ -215,6 +215,48 @@ impl<T: ?Sized> Arc<T> {
}
}
+ /// Convert the [`Arc`] into a raw pointer.
+ ///
+ /// The raw pointer has ownership of the refcount that this Arc object owned.
+ pub fn into_raw(self) -> *const T {
+ let ptr = self.ptr.as_ptr();
+ core::mem::forget(self);
+ // SAFETY: The pointer is valid.
+ unsafe { core::ptr::addr_of!((*ptr).data) }
+ }
+
+ /// Recreates an [`Arc`] instance previously deconstructed via [`Arc::into_raw`].
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`Arc::into_raw`]. Additionally, it
+ /// must not be called more than once for each previous call to [`Arc::into_raw`].
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ let refcount_layout = Layout::new::<bindings::refcount_t>();
+ // SAFETY: The caller guarantees that the pointer is valid.
+ let val_layout = Layout::for_value(unsafe { &*ptr });
+ // SAFETY: We're computing the layout of a real struct that existed when compiling this
+ // binary, so its layout is not so large that it can trigger arithmetic overflow.
+ let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 };
+
+ let metadata: <T as Pointee>::Metadata = core::ptr::metadata(ptr);
+ // SAFETY: The metadata of `T` and `ArcInner<T>` is the same because `ArcInner` is a struct
+ // with `T` as its last field.
+ //
+ // This is documented at:
+ // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>.
+ let metadata: <ArcInner<T> as Pointee>::Metadata =
+ unsafe { core::mem::transmute_copy(&metadata) };
+ // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the
+ // pointer, since it originates from a previous call to `Arc::into_raw` and is still valid.
+ let ptr = unsafe { (ptr as *mut u8).sub(val_offset) as *mut () };
+ let ptr = core::ptr::from_raw_parts_mut(ptr, metadata);
+
+ // SAFETY: By the safety requirements we know that `ptr` came from `Arc::into_raw`, so the
+ // reference count held then will be owned by the new `Arc` object.
+ unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ }
+
/// Returns an [`ArcBorrow`] from the given [`Arc`].
///
/// This is useful when the argument of a function call is an [`ArcBorrow`] (e.g., in a method
@@ -302,7 +344,7 @@ impl<T: ?Sized> Drop for Arc<T> {
// The count reached zero, we must free the memory.
//
// SAFETY: The pointer was initialised from the result of `Box::leak`.
- unsafe { Box::from_raw(self.ptr.as_ptr()) };
+ unsafe { drop(Box::from_raw(self.ptr.as_ptr())) };
}
}
}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index ed353399c4..b679b6f6db 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -91,7 +91,6 @@ unsafe impl Sync for CondVar {}
impl CondVar {
/// Constructs a new condvar initialiser.
- #[allow(clippy::new_ret_no_self)]
pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
pin_init!(Self {
_pin: PhantomPinned,
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 70a785f047..f12a684bc9 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -99,7 +99,6 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
impl<T, B: Backend> Lock<T, B> {
/// Constructs a new lock initialiser.
- #[allow(clippy::new_ret_no_self)]
pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
pin_init!(Self {
data: UnsafeCell::new(t),
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
index 7eda15e5f1..b2299bc7ac 100644
--- a/rust/kernel/task.rs
+++ b/rust/kernel/task.rs
@@ -82,7 +82,7 @@ impl Task {
/// Returns a task reference for the currently executing task/thread.
///
/// The recommended way to get the current task/thread is to use the
- /// [`current`](crate::current) macro because it is safe.
+ /// [`current`] macro because it is safe.
///
/// # Safety
///
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
new file mode 100644
index 0000000000..b67fb1ba16
--- /dev/null
+++ b/rust/kernel/workqueue.rs
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Work queues.
+//!
+//! This file has two components: The raw work item API, and the safe work item API.
+//!
+//! One pattern that is used in both APIs is the `ID` const generic, which exists to allow a single
+//! type to define multiple `work_struct` fields. This is done by choosing an id for each field,
+//! and using that id to specify which field you wish to use. (The actual value doesn't matter, as
+//! long as you use different values for different fields of the same struct.) Since these IDs are
+//! generic, they are used only at compile-time, so they shouldn't exist in the final binary.
+//!
+//! # The raw API
+//!
+//! The raw API consists of the `RawWorkItem` trait, where the work item needs to provide an
+//! arbitrary function that knows how to enqueue the work item. It should usually not be used
+//! directly, but if you want to, you can use it without using the pieces from the safe API.
+//!
+//! # The safe API
+//!
+//! The safe API is used via the `Work` struct and `WorkItem` traits. Furthermore, it also includes
+//! a trait called `WorkItemPointer`, which is usually not used directly by the user.
+//!
+//! * The `Work` struct is the Rust wrapper for the C `work_struct` type.
+//! * The `WorkItem` trait is implemented for structs that can be enqueued to a workqueue.
+//! * The `WorkItemPointer` trait is implemented for the pointer type that points at a something
+//! that implements `WorkItem`.
+//!
+//! ## Example
+//!
+//! This example defines a struct that holds an integer and can be scheduled on the workqueue. When
+//! the struct is executed, it will print the integer. Since there is only one `work_struct` field,
+//! we do not need to specify ids for the fields.
+//!
+//! ```
+//! use kernel::prelude::*;
+//! use kernel::sync::Arc;
+//! use kernel::workqueue::{self, Work, WorkItem};
+//! use kernel::{impl_has_work, new_work};
+//!
+//! #[pin_data]
+//! struct MyStruct {
+//! value: i32,
+//! #[pin]
+//! work: Work<MyStruct>,
+//! }
+//!
+//! impl_has_work! {
+//! impl HasWork<Self> for MyStruct { self.work }
+//! }
+//!
+//! impl MyStruct {
+//! fn new(value: i32) -> Result<Arc<Self>> {
+//! Arc::pin_init(pin_init!(MyStruct {
+//! value,
+//! work <- new_work!("MyStruct::work"),
+//! }))
+//! }
+//! }
+//!
+//! impl WorkItem for MyStruct {
+//! type Pointer = Arc<MyStruct>;
+//!
+//! fn run(this: Arc<MyStruct>) {
+//! pr_info!("The value is: {}", this.value);
+//! }
+//! }
+//!
+//! /// This method will enqueue the struct for execution on the system workqueue, where its value
+//! /// will be printed.
+//! fn print_later(val: Arc<MyStruct>) {
+//! let _ = workqueue::system().enqueue(val);
+//! }
+//! ```
+//!
+//! The following example shows how multiple `work_struct` fields can be used:
+//!
+//! ```
+//! use kernel::prelude::*;
+//! use kernel::sync::Arc;
+//! use kernel::workqueue::{self, Work, WorkItem};
+//! use kernel::{impl_has_work, new_work};
+//!
+//! #[pin_data]
+//! struct MyStruct {
+//! value_1: i32,
+//! value_2: i32,
+//! #[pin]
+//! work_1: Work<MyStruct, 1>,
+//! #[pin]
+//! work_2: Work<MyStruct, 2>,
+//! }
+//!
+//! impl_has_work! {
+//! impl HasWork<Self, 1> for MyStruct { self.work_1 }
+//! impl HasWork<Self, 2> for MyStruct { self.work_2 }
+//! }
+//!
+//! impl MyStruct {
+//! fn new(value_1: i32, value_2: i32) -> Result<Arc<Self>> {
+//! Arc::pin_init(pin_init!(MyStruct {
+//! value_1,
+//! value_2,
+//! work_1 <- new_work!("MyStruct::work_1"),
+//! work_2 <- new_work!("MyStruct::work_2"),
+//! }))
+//! }
+//! }
+//!
+//! impl WorkItem<1> for MyStruct {
+//! type Pointer = Arc<MyStruct>;
+//!
+//! fn run(this: Arc<MyStruct>) {
+//! pr_info!("The value is: {}", this.value_1);
+//! }
+//! }
+//!
+//! impl WorkItem<2> for MyStruct {
+//! type Pointer = Arc<MyStruct>;
+//!
+//! fn run(this: Arc<MyStruct>) {
+//! pr_info!("The second value is: {}", this.value_2);
+//! }
+//! }
+//!
+//! fn print_1_later(val: Arc<MyStruct>) {
+//! let _ = workqueue::system().enqueue::<Arc<MyStruct>, 1>(val);
+//! }
+//!
+//! fn print_2_later(val: Arc<MyStruct>) {
+//! let _ = workqueue::system().enqueue::<Arc<MyStruct>, 2>(val);
+//! }
+//! ```
+//!
+//! C header: [`include/linux/workqueue.h`](../../../../include/linux/workqueue.h)
+
+use crate::{bindings, prelude::*, sync::Arc, sync::LockClassKey, types::Opaque};
+use alloc::alloc::AllocError;
+use alloc::boxed::Box;
+use core::marker::PhantomData;
+use core::pin::Pin;
+
+/// Creates a [`Work`] initialiser with the given name and a newly-created lock class.
+#[macro_export]
+macro_rules! new_work {
+ ($($name:literal)?) => {
+ $crate::workqueue::Work::new($crate::optional_name!($($name)?), $crate::static_lock_class!())
+ };
+}
+
+/// A kernel work queue.
+///
+/// Wraps the kernel's C `struct workqueue_struct`.
+///
+/// It allows work items to be queued to run on thread pools managed by the kernel. Several are
+/// always available, for example, `system`, `system_highpri`, `system_long`, etc.
+#[repr(transparent)]
+pub struct Queue(Opaque<bindings::workqueue_struct>);
+
+// SAFETY: Accesses to workqueues used by [`Queue`] are thread-safe.
+unsafe impl Send for Queue {}
+// SAFETY: Accesses to workqueues used by [`Queue`] are thread-safe.
+unsafe impl Sync for Queue {}
+
+impl Queue {
+ /// Use the provided `struct workqueue_struct` with Rust.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the provided raw pointer is not dangling, that it points at a
+ /// valid workqueue, and that it remains valid until the end of 'a.
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::workqueue_struct) -> &'a Queue {
+ // SAFETY: The `Queue` type is `#[repr(transparent)]`, so the pointer cast is valid. The
+ // caller promises that the pointer is not dangling.
+ unsafe { &*(ptr as *const Queue) }
+ }
+
+ /// Enqueues a work item.
+ ///
+ /// This may fail if the work item is already enqueued in a workqueue.
+ ///
+ /// The work item will be submitted using `WORK_CPU_UNBOUND`.
+ pub fn enqueue<W, const ID: u64>(&self, w: W) -> W::EnqueueOutput
+ where
+ W: RawWorkItem<ID> + Send + 'static,
+ {
+ let queue_ptr = self.0.get();
+
+ // SAFETY: We only return `false` if the `work_struct` is already in a workqueue. The other
+ // `__enqueue` requirements are not relevant since `W` is `Send` and static.
+ //
+ // The call to `bindings::queue_work_on` will dereference the provided raw pointer, which
+ // is ok because `__enqueue` guarantees that the pointer is valid for the duration of this
+ // closure.
+ //
+ // Furthermore, if the C workqueue code accesses the pointer after this call to
+ // `__enqueue`, then the work item was successfully enqueued, and `bindings::queue_work_on`
+ // will have returned true. In this case, `__enqueue` promises that the raw pointer will
+ // stay valid until we call the function pointer in the `work_struct`, so the access is ok.
+ unsafe {
+ w.__enqueue(move |work_ptr| {
+ bindings::queue_work_on(bindings::WORK_CPU_UNBOUND as _, queue_ptr, work_ptr)
+ })
+ }
+ }
+
+ /// Tries to spawn the given function or closure as a work item.
+ ///
+ /// This method can fail because it allocates memory to store the work item.
+ pub fn try_spawn<T: 'static + Send + FnOnce()>(&self, func: T) -> Result<(), AllocError> {
+ let init = pin_init!(ClosureWork {
+ work <- new_work!("Queue::try_spawn"),
+ func: Some(func),
+ });
+
+ self.enqueue(Box::pin_init(init).map_err(|_| AllocError)?);
+ Ok(())
+ }
+}
+
+/// A helper type used in `try_spawn`.
+#[pin_data]
+struct ClosureWork<T> {
+ #[pin]
+ work: Work<ClosureWork<T>>,
+ func: Option<T>,
+}
+
+impl<T> ClosureWork<T> {
+ fn project(self: Pin<&mut Self>) -> &mut Option<T> {
+ // SAFETY: The `func` field is not structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().func }
+ }
+}
+
+impl<T: FnOnce()> WorkItem for ClosureWork<T> {
+ type Pointer = Pin<Box<Self>>;
+
+ fn run(mut this: Pin<Box<Self>>) {
+ if let Some(func) = this.as_mut().project().take() {
+ (func)()
+ }
+ }
+}
+
+/// A raw work item.
+///
+/// This is the low-level trait that is designed for being as general as possible.
+///
+/// The `ID` parameter to this trait exists so that a single type can provide multiple
+/// implementations of this trait. For example, if a struct has multiple `work_struct` fields, then
+/// you will implement this trait once for each field, using a different id for each field. The
+/// actual value of the id is not important as long as you use different ids for different fields
+/// of the same struct. (Fields of different structs need not use different ids.)
+///
+/// Note that the id is used only to select the right method to call during compilation. It wont be
+/// part of the final executable.
+///
+/// # Safety
+///
+/// Implementers must ensure that any pointers passed to a `queue_work_on` closure by `__enqueue`
+/// remain valid for the duration specified in the guarantees section of the documentation for
+/// `__enqueue`.
+pub unsafe trait RawWorkItem<const ID: u64> {
+ /// The return type of [`Queue::enqueue`].
+ type EnqueueOutput;
+
+ /// Enqueues this work item on a queue using the provided `queue_work_on` method.
+ ///
+ /// # Guarantees
+ ///
+ /// If this method calls the provided closure, then the raw pointer is guaranteed to point at a
+ /// valid `work_struct` for the duration of the call to the closure. If the closure returns
+ /// true, then it is further guaranteed that the pointer remains valid until someone calls the
+ /// function pointer stored in the `work_struct`.
+ ///
+ /// # Safety
+ ///
+ /// The provided closure may only return `false` if the `work_struct` is already in a workqueue.
+ ///
+ /// If the work item type is annotated with any lifetimes, then you must not call the function
+ /// pointer after any such lifetime expires. (Never calling the function pointer is okay.)
+ ///
+ /// If the work item type is not [`Send`], then the function pointer must be called on the same
+ /// thread as the call to `__enqueue`.
+ unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput
+ where
+ F: FnOnce(*mut bindings::work_struct) -> bool;
+}
+
+/// Defines the method that should be called directly when a work item is executed.
+///
+/// This trait is implemented by `Pin<Box<T>>` and `Arc<T>`, and is mainly intended to be
+/// implemented for smart pointer types. For your own structs, you would implement [`WorkItem`]
+/// instead. The `run` method on this trait will usually just perform the appropriate
+/// `container_of` translation and then call into the `run` method from the [`WorkItem`] trait.
+///
+/// This trait is used when the `work_struct` field is defined using the [`Work`] helper.
+///
+/// # Safety
+///
+/// Implementers must ensure that [`__enqueue`] uses a `work_struct` initialized with the [`run`]
+/// method of this trait as the function pointer.
+///
+/// [`__enqueue`]: RawWorkItem::__enqueue
+/// [`run`]: WorkItemPointer::run
+pub unsafe trait WorkItemPointer<const ID: u64>: RawWorkItem<ID> {
+ /// Run this work item.
+ ///
+ /// # Safety
+ ///
+ /// The provided `work_struct` pointer must originate from a previous call to `__enqueue` where
+ /// the `queue_work_on` closure returned true, and the pointer must still be valid.
+ unsafe extern "C" fn run(ptr: *mut bindings::work_struct);
+}
+
+/// Defines the method that should be called when this work item is executed.
+///
+/// This trait is used when the `work_struct` field is defined using the [`Work`] helper.
+pub trait WorkItem<const ID: u64 = 0> {
+ /// The pointer type that this struct is wrapped in. This will typically be `Arc<Self>` or
+ /// `Pin<Box<Self>>`.
+ type Pointer: WorkItemPointer<ID>;
+
+ /// The method that should be called when this work item is executed.
+ fn run(this: Self::Pointer);
+}
+
+/// Links for a work item.
+///
+/// This struct contains a function pointer to the `run` function from the [`WorkItemPointer`]
+/// trait, and defines the linked list pointers necessary to enqueue a work item in a workqueue.
+///
+/// Wraps the kernel's C `struct work_struct`.
+///
+/// This is a helper type used to associate a `work_struct` with the [`WorkItem`] that uses it.
+#[repr(transparent)]
+pub struct Work<T: ?Sized, const ID: u64 = 0> {
+ work: Opaque<bindings::work_struct>,
+ _inner: PhantomData<T>,
+}
+
+// SAFETY: Kernel work items are usable from any thread.
+//
+// We do not need to constrain `T` since the work item does not actually contain a `T`.
+unsafe impl<T: ?Sized, const ID: u64> Send for Work<T, ID> {}
+// SAFETY: Kernel work items are usable from any thread.
+//
+// We do not need to constrain `T` since the work item does not actually contain a `T`.
+unsafe impl<T: ?Sized, const ID: u64> Sync for Work<T, ID> {}
+
+impl<T: ?Sized, const ID: u64> Work<T, ID> {
+ /// Creates a new instance of [`Work`].
+ #[inline]
+ #[allow(clippy::new_ret_no_self)]
+ pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self>
+ where
+ T: WorkItem<ID>,
+ {
+ // SAFETY: The `WorkItemPointer` implementation promises that `run` can be used as the work
+ // item function.
+ unsafe {
+ kernel::init::pin_init_from_closure(move |slot| {
+ let slot = Self::raw_get(slot);
+ bindings::init_work_with_key(
+ slot,
+ Some(T::Pointer::run),
+ false,
+ name.as_char_ptr(),
+ key.as_ptr(),
+ );
+ Ok(())
+ })
+ }
+ }
+
+ /// Get a pointer to the inner `work_struct`.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must not be dangling and must be properly aligned. (But the memory
+ /// need not be initialized.)
+ #[inline]
+ pub unsafe fn raw_get(ptr: *const Self) -> *mut bindings::work_struct {
+ // SAFETY: The caller promises that the pointer is aligned and not dangling.
+ //
+ // A pointer cast would also be ok due to `#[repr(transparent)]`. We use `addr_of!` so that
+ // the compiler does not complain that the `work` field is unused.
+ unsafe { Opaque::raw_get(core::ptr::addr_of!((*ptr).work)) }
+ }
+}
+
+/// Declares that a type has a [`Work<T, ID>`] field.
+///
+/// The intended way of using this trait is via the [`impl_has_work!`] macro. You can use the macro
+/// like this:
+///
+/// ```no_run
+/// use kernel::impl_has_work;
+/// use kernel::prelude::*;
+/// use kernel::workqueue::Work;
+///
+/// struct MyWorkItem {
+/// work_field: Work<MyWorkItem, 1>,
+/// }
+///
+/// impl_has_work! {
+/// impl HasWork<MyWorkItem, 1> for MyWorkItem { self.work_field }
+/// }
+/// ```
+///
+/// Note that since the `Work` type is annotated with an id, you can have several `work_struct`
+/// fields by using a different id for each one.
+///
+/// # Safety
+///
+/// The [`OFFSET`] constant must be the offset of a field in Self of type [`Work<T, ID>`]. The methods on
+/// this trait must have exactly the behavior that the definitions given below have.
+///
+/// [`Work<T, ID>`]: Work
+/// [`impl_has_work!`]: crate::impl_has_work
+/// [`OFFSET`]: HasWork::OFFSET
+pub unsafe trait HasWork<T, const ID: u64 = 0> {
+ /// The offset of the [`Work<T, ID>`] field.
+ ///
+ /// [`Work<T, ID>`]: Work
+ const OFFSET: usize;
+
+ /// Returns the offset of the [`Work<T, ID>`] field.
+ ///
+ /// This method exists because the [`OFFSET`] constant cannot be accessed if the type is not Sized.
+ ///
+ /// [`Work<T, ID>`]: Work
+ /// [`OFFSET`]: HasWork::OFFSET
+ #[inline]
+ fn get_work_offset(&self) -> usize {
+ Self::OFFSET
+ }
+
+ /// Returns a pointer to the [`Work<T, ID>`] field.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid struct of type `Self`.
+ ///
+ /// [`Work<T, ID>`]: Work
+ #[inline]
+ unsafe fn raw_get_work(ptr: *mut Self) -> *mut Work<T, ID> {
+ // SAFETY: The caller promises that the pointer is valid.
+ unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut Work<T, ID> }
+ }
+
+ /// Returns a pointer to the struct containing the [`Work<T, ID>`] field.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must point at a [`Work<T, ID>`] field in a struct of type `Self`.
+ ///
+ /// [`Work<T, ID>`]: Work
+ #[inline]
+ unsafe fn work_container_of(ptr: *mut Work<T, ID>) -> *mut Self
+ where
+ Self: Sized,
+ {
+ // SAFETY: The caller promises that the pointer points at a field of the right type in the
+ // right kind of struct.
+ unsafe { (ptr as *mut u8).sub(Self::OFFSET) as *mut Self }
+ }
+}
+
+/// Used to safely implement the [`HasWork<T, ID>`] trait.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::impl_has_work;
+/// use kernel::sync::Arc;
+/// use kernel::workqueue::{self, Work};
+///
+/// struct MyStruct {
+/// work_field: Work<MyStruct, 17>,
+/// }
+///
+/// impl_has_work! {
+/// impl HasWork<MyStruct, 17> for MyStruct { self.work_field }
+/// }
+/// ```
+///
+/// [`HasWork<T, ID>`]: HasWork
+#[macro_export]
+macro_rules! impl_has_work {
+ ($(impl$(<$($implarg:ident),*>)?
+ HasWork<$work_type:ty $(, $id:tt)?>
+ for $self:ident $(<$($selfarg:ident),*>)?
+ { self.$field:ident }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
+ // type.
+ unsafe impl$(<$($implarg),*>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self $(<$($selfarg),*>)? {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
+
+ #[inline]
+ unsafe fn raw_get_work(ptr: *mut Self) -> *mut $crate::workqueue::Work<$work_type $(, $id)?> {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ unsafe {
+ ::core::ptr::addr_of_mut!((*ptr).$field)
+ }
+ }
+ }
+ )*};
+}
+
+impl_has_work! {
+ impl<T> HasWork<Self> for ClosureWork<T> { self.work }
+}
+
+unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasWork<T, ID>,
+{
+ unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+ // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
+ let ptr = ptr as *mut Work<T, ID>;
+ // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
+ let ptr = unsafe { T::work_container_of(ptr) };
+ // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
+ let arc = unsafe { Arc::from_raw(ptr) };
+
+ T::run(arc)
+ }
+}
+
+unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasWork<T, ID>,
+{
+ type EnqueueOutput = Result<(), Self>;
+
+ unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput
+ where
+ F: FnOnce(*mut bindings::work_struct) -> bool,
+ {
+ // Casting between const and mut is not a problem as long as the pointer is a raw pointer.
+ let ptr = Arc::into_raw(self).cast_mut();
+
+ // SAFETY: Pointers into an `Arc` point at a valid value.
+ let work_ptr = unsafe { T::raw_get_work(ptr) };
+ // SAFETY: `raw_get_work` returns a pointer to a valid value.
+ let work_ptr = unsafe { Work::raw_get(work_ptr) };
+
+ if queue_work_on(work_ptr) {
+ Ok(())
+ } else {
+ // SAFETY: The work queue has not taken ownership of the pointer.
+ Err(unsafe { Arc::from_raw(ptr) })
+ }
+ }
+}
+
+unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<Box<T>>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasWork<T, ID>,
+{
+ unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+ // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
+ let ptr = ptr as *mut Work<T, ID>;
+ // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
+ let ptr = unsafe { T::work_container_of(ptr) };
+ // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
+ let boxed = unsafe { Box::from_raw(ptr) };
+ // SAFETY: The box was already pinned when it was enqueued.
+ let pinned = unsafe { Pin::new_unchecked(boxed) };
+
+ T::run(pinned)
+ }
+}
+
+unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<Box<T>>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasWork<T, ID>,
+{
+ type EnqueueOutput = ();
+
+ unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput
+ where
+ F: FnOnce(*mut bindings::work_struct) -> bool,
+ {
+ // SAFETY: We're not going to move `self` or any of its fields, so its okay to temporarily
+ // remove the `Pin` wrapper.
+ let boxed = unsafe { Pin::into_inner_unchecked(self) };
+ let ptr = Box::into_raw(boxed);
+
+ // SAFETY: Pointers into a `Box` point at a valid value.
+ let work_ptr = unsafe { T::raw_get_work(ptr) };
+ // SAFETY: `raw_get_work` returns a pointer to a valid value.
+ let work_ptr = unsafe { Work::raw_get(work_ptr) };
+
+ if !queue_work_on(work_ptr) {
+ // SAFETY: This method requires exclusive ownership of the box, so it cannot be in a
+ // workqueue.
+ unsafe { ::core::hint::unreachable_unchecked() }
+ }
+ }
+}
+
+/// Returns the system work queue (`system_wq`).
+///
+/// It is the one used by `schedule[_delayed]_work[_on]()`. Multi-CPU multi-threaded. There are
+/// users which expect relatively short queue flush time.
+///
+/// Callers shouldn't queue work items which can run for too long.
+pub fn system() -> &'static Queue {
+ // SAFETY: `system_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_wq) }
+}
+
+/// Returns the system high-priority work queue (`system_highpri_wq`).
+///
+/// It is similar to the one returned by [`system`] but for work items which require higher
+/// scheduling priority.
+pub fn system_highpri() -> &'static Queue {
+ // SAFETY: `system_highpri_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_highpri_wq) }
+}
+
+/// Returns the system work queue for potentially long-running work items (`system_long_wq`).
+///
+/// It is similar to the one returned by [`system`] but may host long running work items. Queue
+/// flushing might take relatively long.
+pub fn system_long() -> &'static Queue {
+ // SAFETY: `system_long_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_long_wq) }
+}
+
+/// Returns the system unbound work queue (`system_unbound_wq`).
+///
+/// Workers are not bound to any specific CPU, not concurrency managed, and all queued work items
+/// are executed immediately as long as `max_active` limit is not reached and resources are
+/// available.
+pub fn system_unbound() -> &'static Queue {
+ // SAFETY: `system_unbound_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_unbound_wq) }
+}
+
+/// Returns the system freezable work queue (`system_freezable_wq`).
+///
+/// It is equivalent to the one returned by [`system`] except that it's freezable.
+///
+/// A freezable workqueue participates in the freeze phase of the system suspend operations. Work
+/// items on the workqueue are drained and no new work item starts execution until thawed.
+pub fn system_freezable() -> &'static Queue {
+ // SAFETY: `system_freezable_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_freezable_wq) }
+}
+
+/// Returns the system power-efficient work queue (`system_power_efficient_wq`).
+///
+/// It is inclined towards saving power and is converted to "unbound" variants if the
+/// `workqueue.power_efficient` kernel parameter is specified; otherwise, it is similar to the one
+/// returned by [`system`].
+pub fn system_power_efficient() -> &'static Queue {
+ // SAFETY: `system_power_efficient_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_power_efficient_wq) }
+}
+
+/// Returns the system freezable power-efficient work queue (`system_freezable_power_efficient_wq`).
+///
+/// It is similar to the one returned by [`system_power_efficient`] except that is freezable.
+///
+/// A freezable workqueue participates in the freeze phase of the system suspend operations. Work
+/// items on the workqueue are drained and no new work item starts execution until thawed.
+pub fn system_freezable_power_efficient() -> &'static Queue {
+ // SAFETY: `system_freezable_power_efficient_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_freezable_power_efficient_wq) }
+}