summaryrefslogtreecommitdiffstats
path: root/rust
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
commit34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch)
tree62db60558cbf089714b48daeabca82bf2b20b20e /rust
parentAdding debian version 6.8.12-1. (diff)
downloadlinux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz
linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'rust')
-rw-r--r--rust/Makefile58
-rw-r--r--rust/alloc/alloc.rs12
-rw-r--r--rust/alloc/boxed.rs34
-rw-r--r--rust/alloc/collections/mod.rs1
-rw-r--r--rust/alloc/lib.rs9
-rw-r--r--rust/alloc/raw_vec.rs77
-rw-r--r--rust/alloc/vec/into_iter.rs16
-rw-r--r--rust/alloc/vec/mod.rs81
-rw-r--r--rust/bindings/bindings_helper.h5
-rw-r--r--rust/kernel/allocator.rs2
-rw-r--r--rust/kernel/error.rs10
-rw-r--r--rust/kernel/init.rs22
-rw-r--r--rust/kernel/ioctl.rs6
-rw-r--r--rust/kernel/lib.rs37
-rw-r--r--rust/kernel/net/phy.rs24
-rw-r--r--rust/kernel/str.rs193
-rw-r--r--rust/kernel/sync.rs5
-rw-r--r--rust/kernel/sync/arc.rs30
-rw-r--r--rust/kernel/sync/condvar.rs110
-rw-r--r--rust/kernel/sync/lock.rs19
-rw-r--r--rust/kernel/sync/lock/mutex.rs3
-rw-r--r--rust/kernel/sync/lock/spinlock.rs5
-rw-r--r--rust/kernel/sync/locked_by.rs7
-rw-r--r--rust/kernel/task.rs24
-rw-r--r--rust/kernel/time.rs20
-rw-r--r--rust/kernel/types.rs22
-rw-r--r--rust/kernel/workqueue.rs84
27 files changed, 682 insertions, 234 deletions
diff --git a/rust/Makefile b/rust/Makefile
index cd9e5e3fce..86a125c424 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -40,7 +40,7 @@ obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.o
ifdef CONFIG_RUST
# `$(rust_flags)` is passed in case the user added `--sysroot`.
-rustc_sysroot := $(shell $(RUSTC) $(rust_flags) --print sysroot)
+rustc_sysroot := $(shell MAKEFLAGS= $(RUSTC) $(rust_flags) --print sysroot)
rustc_host_target := $(shell $(RUSTC) --version --verbose | grep -F 'host: ' | cut -d' ' -f2)
RUST_LIB_SRC ?= $(rustc_sysroot)/lib/rustlib/src/rust/library
@@ -100,7 +100,7 @@ rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
-e 's:rust-logo-[0-9a-f]+\.svg:logo.svg:g' \
-e 's:favicon-[0-9a-f]+\.svg:logo.svg:g' \
-e 's:<link rel="alternate icon" type="image/png" href="[/.]+/static\.files/favicon-(16x16|32x32)-[0-9a-f]+\.png">::g' \
- -e 's:<a href="srctree/([^"]+)">:<a href="$(abs_srctree)/\1">:g'
+ -e 's:<a href="srctree/([^"]+)">:<a href="$(realpath $(srctree))/\1">:g'
$(Q)for f in $(rustdoc_output)/static.files/rustdoc-*.css; do \
echo ".logo-container > img { object-fit: contain; }" >> $$f; done
@@ -108,14 +108,14 @@ rustdoc-macros: private rustdoc_host = yes
rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
--extern proc_macro
rustdoc-macros: $(src)/macros/lib.rs FORCE
- $(call if_changed,rustdoc)
+ +$(call if_changed,rustdoc)
rustdoc-core: private rustc_target_flags = $(core-cfgs)
rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
- $(call if_changed,rustdoc)
+ +$(call if_changed,rustdoc)
rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
- $(call if_changed,rustdoc)
+ +$(call if_changed,rustdoc)
# We need to allow `rustdoc::broken_intra_doc_links` because some
# `no_global_oom_handling` functions refer to non-`no_global_oom_handling`
@@ -124,7 +124,7 @@ rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
rustdoc-alloc: private rustc_target_flags = $(alloc-cfgs) \
-Arustdoc::broken_intra_doc_links
rustdoc-alloc: $(src)/alloc/lib.rs rustdoc-core rustdoc-compiler_builtins FORCE
- $(call if_changed,rustdoc)
+ +$(call if_changed,rustdoc)
rustdoc-kernel: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so \
@@ -132,7 +132,7 @@ rustdoc-kernel: private rustc_target_flags = --extern alloc \
rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-macros \
rustdoc-compiler_builtins rustdoc-alloc $(obj)/libmacros.so \
$(obj)/bindings.o FORCE
- $(call if_changed,rustdoc)
+ +$(call if_changed,rustdoc)
quiet_cmd_rustc_test_library = RUSTC TL $<
cmd_rustc_test_library = \
@@ -146,18 +146,18 @@ quiet_cmd_rustc_test_library = RUSTC TL $<
--crate-name $(subst rusttest-,,$(subst rusttestlib-,,$@)) $<
rusttestlib-build_error: $(src)/build_error.rs rusttest-prepare FORCE
- $(call if_changed,rustc_test_library)
+ +$(call if_changed,rustc_test_library)
rusttestlib-macros: private rustc_target_flags = --extern proc_macro
rusttestlib-macros: private rustc_test_library_proc = yes
rusttestlib-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
- $(call if_changed,rustc_test_library)
+ +$(call if_changed,rustc_test_library)
rusttestlib-bindings: $(src)/bindings/lib.rs rusttest-prepare FORCE
- $(call if_changed,rustc_test_library)
+ +$(call if_changed,rustc_test_library)
rusttestlib-uapi: $(src)/uapi/lib.rs rusttest-prepare FORCE
- $(call if_changed,rustc_test_library)
+ +$(call if_changed,rustc_test_library)
quiet_cmd_rustdoc_test = RUSTDOC T $<
cmd_rustdoc_test = \
@@ -188,7 +188,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
$(src)/kernel/lib.rs $(obj)/kernel.o \
$(objtree)/scripts/rustdoc_test_builder \
$(objtree)/scripts/rustdoc_test_gen FORCE
- $(call if_changed,rustdoc_test_kernel)
+ +$(call if_changed,rustdoc_test_kernel)
# We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
# so for the moment we skip `-Cpanic=abort`.
@@ -253,21 +253,21 @@ quiet_cmd_rustsysroot = RUSTSYSROOT
$(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib
rusttest-prepare: FORCE
- $(call if_changed,rustsysroot)
+ +$(call if_changed,rustsysroot)
rusttest-macros: private rustc_target_flags = --extern proc_macro
rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
rusttest-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
- $(call if_changed,rustc_test)
- $(call if_changed,rustdoc_test)
+ +$(call if_changed,rustc_test)
+ +$(call if_changed,rustdoc_test)
rusttest-kernel: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros --extern bindings --extern uapi
rusttest-kernel: $(src)/kernel/lib.rs rusttest-prepare \
rusttestlib-build_error rusttestlib-macros rusttestlib-bindings \
rusttestlib-uapi FORCE
- $(call if_changed,rustc_test)
- $(call if_changed,rustc_test_library)
+ +$(call if_changed,rustc_test)
+ +$(call if_changed,rustc_test_library)
ifdef CONFIG_CC_IS_CLANG
bindgen_c_flags = $(c_flags)
@@ -296,6 +296,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
# Derived from `scripts/Makefile.clang`.
BINDGEN_TARGET_x86 := x86_64-linux-gnu
+BINDGEN_TARGET_arm64 := aarch64-linux-gnu
BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
# All warnings are inhibited since GCC builds are very experimental,
@@ -395,7 +396,7 @@ quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
# Therefore, to get `libmacros.so` automatically recompiled when the compiler
# version changes, we add `core.o` as a dependency (even if it is not needed).
$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE
- $(call if_changed_dep,rustc_procmacro)
+ +$(call if_changed_dep,rustc_procmacro)
quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
cmd_rustc_library = \
@@ -412,7 +413,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
rust-analyzer:
$(Q)$(srctree)/scripts/generate_rust_analyzer.py \
--cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \
- $(abs_srctree) $(abs_objtree) \
+ $(realpath $(srctree)) $(realpath $(objtree)) \
$(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \
$(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json
@@ -433,37 +434,40 @@ $(obj)/core.o: private skip_clippy = 1
$(obj)/core.o: private skip_flags = -Dunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
-$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs scripts/target.json FORCE
- $(call if_changed_dep,rustc_library)
+$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
+ +$(call if_changed_dep,rustc_library)
+ifneq ($(or $(CONFIG_X86_64),$(CONFIG_LOONGARCH)),)
+$(obj)/core.o: scripts/target.json
+endif
$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
- $(call if_changed_dep,rustc_library)
+ +$(call if_changed_dep,rustc_library)
$(obj)/alloc.o: private skip_clippy = 1
$(obj)/alloc.o: private skip_flags = -Dunreachable_pub
$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
$(obj)/alloc.o: $(src)/alloc/lib.rs $(obj)/compiler_builtins.o FORCE
- $(call if_changed_dep,rustc_library)
+ +$(call if_changed_dep,rustc_library)
$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
- $(call if_changed_dep,rustc_library)
+ +$(call if_changed_dep,rustc_library)
$(obj)/bindings.o: $(src)/bindings/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/bindings/bindings_generated.rs \
$(obj)/bindings/bindings_helpers_generated.rs FORCE
- $(call if_changed_dep,rustc_library)
+ +$(call if_changed_dep,rustc_library)
$(obj)/uapi.o: $(src)/uapi/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/uapi/uapi_generated.rs FORCE
- $(call if_changed_dep,rustc_library)
+ +$(call if_changed_dep,rustc_library)
$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros --extern bindings --extern uapi
$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
$(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE
- $(call if_changed_dep,rustc_library)
+ +$(call if_changed_dep,rustc_library)
endif # CONFIG_RUST
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
index 150e13750f..abb791cc23 100644
--- a/rust/alloc/alloc.rs
+++ b/rust/alloc/alloc.rs
@@ -379,13 +379,20 @@ pub const fn handle_alloc_error(layout: Layout) -> ! {
panic!("allocation failed");
}
+ #[inline]
fn rt_error(layout: Layout) -> ! {
unsafe {
__rust_alloc_error_handler(layout.size(), layout.align());
}
}
- unsafe { core::intrinsics::const_eval_select((layout,), ct_error, rt_error) }
+ #[cfg(not(feature = "panic_immediate_abort"))]
+ unsafe {
+ core::intrinsics::const_eval_select((layout,), ct_error, rt_error)
+ }
+
+ #[cfg(feature = "panic_immediate_abort")]
+ ct_error(layout)
}
// For alloc test `std::alloc::handle_alloc_error` can be used directly.
@@ -418,12 +425,14 @@ pub mod __alloc_error_handler {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialize clones into pre-allocated, uninitialized memory.
/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
pub(crate) trait WriteCloneIntoRaw: Sized {
unsafe fn write_clone_into_raw(&self, target: *mut Self);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone> WriteCloneIntoRaw for T {
#[inline]
default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
@@ -433,6 +442,7 @@ impl<T: Clone> WriteCloneIntoRaw for T {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy> WriteCloneIntoRaw for T {
#[inline]
unsafe fn write_clone_into_raw(&self, target: *mut Self) {
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
index 9620eba172..c93a22a5c9 100644
--- a/rust/alloc/boxed.rs
+++ b/rust/alloc/boxed.rs
@@ -161,7 +161,7 @@ use core::marker::Tuple;
use core::marker::Unsize;
use core::mem::{self, SizedTypeProperties};
use core::ops::{
- CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
+ CoerceUnsized, Coroutine, CoroutineState, Deref, DerefMut, DispatchFromDyn, Receiver,
};
use core::pin::Pin;
use core::ptr::{self, NonNull, Unique};
@@ -211,7 +211,7 @@ impl<T> Box<T> {
/// ```
/// let five = Box::new(5);
/// ```
- #[cfg(all(not(no_global_oom_handling)))]
+ #[cfg(not(no_global_oom_handling))]
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
@@ -1042,10 +1042,18 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// use std::ptr;
///
/// let x = Box::new(String::from("Hello"));
- /// let p = Box::into_raw(x);
+ /// let ptr = Box::into_raw(x);
/// unsafe {
- /// ptr::drop_in_place(p);
- /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// ptr::drop_in_place(ptr);
+ /// dealloc(ptr as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ /// Note: This is equivalent to the following:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
+ /// unsafe {
+ /// drop(Box::from_raw(ptr));
/// }
/// ```
///
@@ -2110,28 +2118,28 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R> + Unpin, R, A: Allocator> Coroutine<R> for Box<G, A>
where
A: 'static,
{
type Yield = G::Yield;
type Return = G::Return;
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume(Pin::new(&mut *self), arg)
}
}
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R>, R, A: Allocator> Generator<R> for Pin<Box<G, A>>
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R>, R, A: Allocator> Coroutine<R> for Pin<Box<G, A>>
where
A: 'static,
{
type Yield = G::Yield;
type Return = G::Return;
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume((*self).as_mut(), arg)
}
}
@@ -2448,4 +2456,8 @@ impl<T: core::error::Error> core::error::Error for Box<T> {
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
core::error::Error::source(&**self)
}
+
+ fn provide<'b>(&'b self, request: &mut core::error::Request<'b>) {
+ core::error::Error::provide(&**self, request);
+ }
}
diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs
index 2506065d15..00ffb3b973 100644
--- a/rust/alloc/collections/mod.rs
+++ b/rust/alloc/collections/mod.rs
@@ -150,6 +150,7 @@ impl Display for TryReserveError {
/// An intermediate trait for specialization of `Extend`.
#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
index 9c7ea73da1..36f79c0755 100644
--- a/rust/alloc/lib.rs
+++ b/rust/alloc/lib.rs
@@ -80,6 +80,8 @@
not(no_sync),
target_has_atomic = "ptr"
))]
+#![doc(rust_logo)]
+#![feature(rustdoc_internals)]
#![no_std]
#![needs_allocator]
// Lints:
@@ -115,7 +117,6 @@
#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_write)]
-#![feature(const_maybe_uninit_zeroed)]
#![feature(const_pin)]
#![feature(const_refs_to_cell)]
#![feature(const_size_of_val)]
@@ -141,7 +142,6 @@
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
-#![feature(pointer_byte_offsets)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
@@ -156,6 +156,7 @@
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
+#![feature(trusted_fused)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
@@ -168,7 +169,7 @@
//
// Language features:
// tidy-alphabetical-start
-#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(not(test), feature(coroutine_trait))]
#![cfg_attr(test, feature(panic_update_hook))]
#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
@@ -276,7 +277,7 @@ pub(crate) mod test_helpers {
/// seed not being the same for every RNG invocation too.
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use std::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ let mut hasher = std::hash::RandomState::new().build_hasher();
std::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec =
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
index a7425582a3..98b6abf30a 100644
--- a/rust/alloc/raw_vec.rs
+++ b/rust/alloc/raw_vec.rs
@@ -27,6 +27,16 @@ enum AllocInit {
Zeroed,
}
+#[repr(transparent)]
+#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
+#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
+#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
+struct Cap(usize);
+
+impl Cap {
+ const ZERO: Cap = unsafe { Cap(0) };
+}
+
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
@@ -52,7 +62,12 @@ enum AllocInit {
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
- cap: usize,
+ /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
+ ///
+ /// # Safety
+ ///
+ /// `cap` must be in the `0..=isize::MAX` range.
+ cap: Cap,
alloc: A,
}
@@ -121,7 +136,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// the returned `RawVec`.
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
- Self { ptr: Unique::dangling(), cap: 0, alloc }
+ Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
@@ -203,7 +218,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// here should change to `ptr.len() / mem::size_of::<T>()`.
Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: capacity,
+ cap: unsafe { Cap(capacity) },
alloc,
}
}
@@ -228,7 +243,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// here should change to `ptr.len() / mem::size_of::<T>()`.
Ok(Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: capacity,
+ cap: unsafe { Cap(capacity) },
alloc,
})
}
@@ -240,12 +255,13 @@ impl<T, A: Allocator> RawVec<T, A> {
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
- /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// systems). For ZSTs capacity is ignored.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
- Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
@@ -261,7 +277,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
- if T::IS_ZST { usize::MAX } else { self.cap }
+ if T::IS_ZST { usize::MAX } else { self.cap.0 }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -270,7 +286,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if T::IS_ZST || self.cap == 0 {
+ if T::IS_ZST || self.cap.0 == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
@@ -280,7 +296,7 @@ impl<T, A: Allocator> RawVec<T, A> {
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
let align = mem::align_of::<T>();
- let size = mem::size_of::<T>().unchecked_mul(self.cap);
+ let size = mem::size_of::<T>().unchecked_mul(self.cap.0);
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
@@ -338,10 +354,13 @@ impl<T, A: Allocator> RawVec<T, A> {
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
- self.grow_amortized(len, additional)
- } else {
- Ok(())
+ self.grow_amortized(len, additional)?;
}
+ unsafe {
+ // Inform the optimizer that the reservation has succeeded or wasn't needed
+ core::intrinsics::assume(!self.needs_to_grow(len, additional));
+ }
+ Ok(())
}
/// The same as `reserve_for_push`, but returns on errors instead of panicking or aborting.
@@ -378,7 +397,14 @@ impl<T, A: Allocator> RawVec<T, A> {
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
- if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
+ if self.needs_to_grow(len, additional) {
+ self.grow_exact(len, additional)?;
+ }
+ unsafe {
+ // Inform the optimizer that the reservation has succeeded or wasn't needed
+ core::intrinsics::assume(!self.needs_to_grow(len, additional));
+ }
+ Ok(())
}
/// Shrinks the buffer down to the specified capacity. If the given amount
@@ -404,12 +430,15 @@ impl<T, A: Allocator> RawVec<T, A> {
additional > self.capacity().wrapping_sub(len)
}
- fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
+ /// # Safety:
+ ///
+ /// `cap` must not exceed `isize::MAX`.
+ unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
- self.cap = cap;
+ self.cap = unsafe { Cap(cap) };
}
// This method is usually instantiated many times. So we want it to be as
@@ -434,14 +463,15 @@ impl<T, A: Allocator> RawVec<T, A> {
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
- let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(self.cap.0 * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe { self.set_ptr_and_cap(ptr, cap) };
Ok(())
}
@@ -460,7 +490,10 @@ impl<T, A: Allocator> RawVec<T, A> {
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
Ok(())
}
@@ -478,7 +511,7 @@ impl<T, A: Allocator> RawVec<T, A> {
if cap == 0 {
unsafe { self.alloc.deallocate(ptr, layout) };
self.ptr = Unique::dangling();
- self.cap = 0;
+ self.cap = Cap::ZERO;
} else {
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
@@ -489,7 +522,10 @@ impl<T, A: Allocator> RawVec<T, A> {
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: if the allocation is valid, then the capacity is too
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
}
Ok(())
}
@@ -569,6 +605,7 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
index aac0ec16ae..136bfe94af 100644
--- a/rust/alloc/vec/into_iter.rs
+++ b/rust/alloc/vec/into_iter.rs
@@ -9,7 +9,8 @@ use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
use core::iter::{
- FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
+ FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen,
+ TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
@@ -287,9 +288,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// Also note the implementation of `Self: TrustedRandomAccess` requires
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
- unsafe {
- if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
- }
+ unsafe { if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } }
}
}
@@ -341,6 +340,10 @@ impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
@@ -425,7 +428,10 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
+unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
+ const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+ const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index 41ca71805e..220fb9d6f4 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -105,6 +105,7 @@ mod into_iter;
#[cfg(not(no_global_oom_handling))]
use self::is_zero::IsZero;
+#[cfg(not(no_global_oom_handling))]
mod is_zero;
#[cfg(not(no_global_oom_handling))]
@@ -123,7 +124,7 @@ use self::set_len_on_drop::SetLenOnDrop;
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
+use self::in_place_drop::{InPlaceDrop, InPlaceDstDataSrcBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@@ -1376,7 +1377,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline]
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
@@ -1436,7 +1437,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
@@ -1565,7 +1566,8 @@ impl<T, A: Allocator> Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap_remove(&mut self, index: usize) -> T {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("swap_remove index (is {index}) should be < len (is {len})");
}
@@ -1606,7 +1608,8 @@ impl<T, A: Allocator> Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, index: usize, element: T) {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("insertion index (is {index}) should be <= len (is {len})");
}
@@ -1667,7 +1670,7 @@ impl<T, A: Allocator> Vec<T, A> {
#[track_caller]
pub fn remove(&mut self, index: usize) -> T {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
#[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("removal index (is {index}) should be < len (is {len})");
@@ -1891,7 +1894,32 @@ impl<T, A: Allocator> Vec<T, A> {
return;
}
- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ // Check if we ever want to remove anything.
+ // This allows to use copy_non_overlapping in next cycle.
+ // And avoids any memory writes if we don't need to remove anything.
+ let mut first_duplicate_idx: usize = 1;
+ let start = self.as_mut_ptr();
+ while first_duplicate_idx != len {
+ let found_duplicate = unsafe {
+ // SAFETY: first_duplicate always in range [1..len)
+ // Note that we start iteration from 1 so we never overflow.
+ let prev = start.add(first_duplicate_idx.wrapping_sub(1));
+ let current = start.add(first_duplicate_idx);
+ // We explicitly say in docs that references are reversed.
+ same_bucket(&mut *current, &mut *prev)
+ };
+ if found_duplicate {
+ break;
+ }
+ first_duplicate_idx += 1;
+ }
+ // Don't need to remove anything.
+ // We cannot get bigger than len.
+ if first_duplicate_idx == len {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
/* Offset of the element we want to check if it is duplicate */
read: usize,
@@ -1937,31 +1965,39 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
- let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
- let ptr = gap.vec.as_mut_ptr();
-
/* Drop items while going through Vec, it should be more efficient than
* doing slice partition_dedup + truncate */
+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
+ let mut gap =
+ FillGapOnDrop { read: first_duplicate_idx + 1, write: first_duplicate_idx, vec: self };
+ unsafe {
+ // SAFETY: we checked that first_duplicate_idx in bounds before.
+ // If drop panics, `gap` would remove this item without drop.
+ ptr::drop_in_place(start.add(first_duplicate_idx));
+ }
+
/* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
* are always in-bounds and read_ptr never aliases prev_ptr */
unsafe {
while gap.read < len {
- let read_ptr = ptr.add(gap.read);
- let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+ let read_ptr = start.add(gap.read);
+ let prev_ptr = start.add(gap.write.wrapping_sub(1));
- if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // We explicitly say in docs that references are reversed.
+ let found_duplicate = same_bucket(&mut *read_ptr, &mut *prev_ptr);
+ if found_duplicate {
// Increase `gap.read` now since the drop may panic.
gap.read += 1;
/* We have found duplicate, drop it in-place */
ptr::drop_in_place(read_ptr);
} else {
- let write_ptr = ptr.add(gap.write);
+ let write_ptr = start.add(gap.write);
- /* Because `read_ptr` can be equal to `write_ptr`, we either
- * have to use `copy` or conditional `copy_nonoverlapping`.
- * Looks like the first option is faster. */
- ptr::copy(read_ptr, write_ptr, 1);
+ /* read_ptr cannot be equal to write_ptr because at this point
+ * we guaranteed to skip at least one element (before loop starts).
+ */
+ ptr::copy_nonoverlapping(read_ptr, write_ptr, 1);
/* We have filled that place, so go further */
gap.write += 1;
@@ -2097,6 +2133,7 @@ impl<T, A: Allocator> Vec<T, A> {
} else {
unsafe {
self.len -= 1;
+ core::intrinsics::assume(self.len < self.capacity());
Some(ptr::read(self.as_ptr().add(self.len())))
}
}
@@ -2299,7 +2336,8 @@ impl<T, A: Allocator> Vec<T, A> {
A: Clone,
{
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(at: usize, len: usize) -> ! {
panic!("`at` split index (is {at}) should be <= len (is {len})");
}
@@ -2840,6 +2878,7 @@ pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<
<T as SpecFromElem>::from_elem(elem, n, alloc)
}
+#[cfg(not(no_global_oom_handling))]
trait ExtendFromWithinSpec {
/// # Safety
///
@@ -2848,6 +2887,7 @@ trait ExtendFromWithinSpec {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
// SAFETY:
@@ -2867,6 +2907,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
let count = src.len();
@@ -2947,7 +2988,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// ```
/// use std::hash::BuildHasher;
///
-/// let b = std::collections::hash_map::RandomState::new();
+/// let b = std::hash::RandomState::new();
/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
/// assert_eq!(b.hash_one(v), b.hash_one(s));
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index c0cb4b05b9..65b98831b9 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -9,12 +9,13 @@
#include <kunit/test.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
+#include <linux/jiffies.h>
#include <linux/mdio.h>
#include <linux/phy.h>
-#include <linux/slab.h>
#include <linux/refcount.h>
-#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
#include <linux/workqueue.h>
/* `bindgen` gets confused at certain things. */
diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
index 4b057e8373..01ad139e19 100644
--- a/rust/kernel/allocator.rs
+++ b/rust/kernel/allocator.rs
@@ -35,7 +35,7 @@ unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gf
// - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the
// function safety requirement.
// - `size` is greater than 0 since it's either a `layout.size()` (which cannot be zero
- // according to the function safety requirement) or a result from `next_power_of_two()`.
+ // according to the function safety requirement) or a result from `next_power_of_two()`.
unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags) as *mut u8 }
}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 4f0c1edd63..4786d3ee1e 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -264,13 +264,9 @@ pub fn to_result(err: core::ffi::c_int) -> Result {
/// pdev: &mut PlatformDevice,
/// index: u32,
/// ) -> Result<*mut core::ffi::c_void> {
-/// // SAFETY: FFI call.
-/// unsafe {
-/// from_err_ptr(bindings::devm_platform_ioremap_resource(
-/// pdev.to_ptr(),
-/// index,
-/// ))
-/// }
+/// // SAFETY: `pdev` points to a valid platform device. There are no safety requirements
+/// // on `index`.
+/// from_err_ptr(unsafe { bindings::devm_platform_ioremap_resource(pdev.to_ptr(), index) })
/// }
/// ```
// TODO: Remove `dead_code` marker once an in-kernel client is available.
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index cf9575f156..09004b56fb 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -36,7 +36,7 @@
//!
//! ```rust
//! # #![allow(clippy::disallowed_names)]
-//! use kernel::{prelude::*, sync::Mutex, new_mutex};
+//! use kernel::sync::{new_mutex, Mutex};
//! # use core::pin::Pin;
//! #[pin_data]
//! struct Foo {
@@ -56,7 +56,7 @@
//!
//! ```rust
//! # #![allow(clippy::disallowed_names)]
-//! # use kernel::{prelude::*, sync::Mutex, new_mutex};
+//! # use kernel::sync::{new_mutex, Mutex};
//! # use core::pin::Pin;
//! # #[pin_data]
//! # struct Foo {
@@ -79,7 +79,7 @@
//! above method only works for types where you can access the fields.
//!
//! ```rust
-//! # use kernel::{new_mutex, sync::{Arc, Mutex}};
+//! # use kernel::sync::{new_mutex, Arc, Mutex};
//! let mtx: Result<Arc<Mutex<usize>>> = Arc::pin_init(new_mutex!(42, "example::mtx"));
//! ```
//!
@@ -751,10 +751,10 @@ macro_rules! try_init {
///
/// # Safety
///
-/// When implementing this type you will need to take great care. Also there are probably very few
+/// When implementing this trait you will need to take great care. Also there are probably very few
/// cases where a manual implementation is necessary. Use [`pin_init_from_closure`] where possible.
///
-/// The [`PinInit::__pinned_init`] function
+/// The [`PinInit::__pinned_init`] function:
/// - returns `Ok(())` if it initialized every field of `slot`,
/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
/// - `slot` can be deallocated without UB occurring,
@@ -861,10 +861,10 @@ where
///
/// # Safety
///
-/// When implementing this type you will need to take great care. Also there are probably very few
+/// When implementing this trait you will need to take great care. Also there are probably very few
/// cases where a manual implementation is necessary. Use [`init_from_closure`] where possible.
///
-/// The [`Init::__init`] function
+/// The [`Init::__init`] function:
/// - returns `Ok(())` if it initialized every field of `slot`,
/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
/// - `slot` can be deallocated without UB occurring,
@@ -1013,7 +1013,7 @@ pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
///
/// ```rust
/// use kernel::{error::Error, init::init_array_from_fn};
-/// let array: Box<[usize; 1_000]>= Box::init::<Error>(init_array_from_fn(|i| i)).unwrap();
+/// let array: Box<[usize; 1_000]> = Box::init::<Error>(init_array_from_fn(|i| i)).unwrap();
/// assert_eq!(array.len(), 1_000);
/// ```
pub fn init_array_from_fn<I, const N: usize, T, E>(
@@ -1027,7 +1027,7 @@ where
// Counts the number of initialized elements and when dropped drops that many elements from
// `slot`.
let mut init_count = ScopeGuard::new_with_data(0, |i| {
- // We now free every element that has been initialized before:
+ // We now free every element that has been initialized before.
// SAFETY: The loop initialized exactly the values from 0..i and since we
// return `Err` below, the caller will consider the memory at `slot` as
// uninitialized.
@@ -1056,7 +1056,7 @@ where
///
/// ```rust
/// use kernel::{sync::{Arc, Mutex}, init::pin_init_array_from_fn, new_mutex};
-/// let array: Arc<[Mutex<usize>; 1_000]>=
+/// let array: Arc<[Mutex<usize>; 1_000]> =
/// Arc::pin_init(pin_init_array_from_fn(|i| new_mutex!(i))).unwrap();
/// assert_eq!(array.len(), 1_000);
/// ```
@@ -1071,7 +1071,7 @@ where
// Counts the number of initialized elements and when dropped drops that many elements from
// `slot`.
let mut init_count = ScopeGuard::new_with_data(0, |i| {
- // We now free every element that has been initialized before:
+ // We now free every element that has been initialized before.
// SAFETY: The loop initialized exactly the values from 0..i and since we
// return `Err` below, the caller will consider the memory at `slot` as
// uninitialized.
diff --git a/rust/kernel/ioctl.rs b/rust/kernel/ioctl.rs
index f1d42ab699..cfa7d080b5 100644
--- a/rust/kernel/ioctl.rs
+++ b/rust/kernel/ioctl.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-//! ioctl() number definitions
+//! `ioctl()` number definitions.
//!
//! C header: [`include/asm-generic/ioctl.h`](srctree/include/asm-generic/ioctl.h)
@@ -28,13 +28,13 @@ pub const fn _IO(ty: u32, nr: u32) -> u32 {
_IOC(uapi::_IOC_NONE, ty, nr, 0)
}
-/// Build an ioctl number for an read-only ioctl.
+/// Build an ioctl number for a read-only ioctl.
#[inline(always)]
pub const fn _IOR<T>(ty: u32, nr: u32) -> u32 {
_IOC(uapi::_IOC_READ, ty, nr, core::mem::size_of::<T>())
}
-/// Build an ioctl number for an write-only ioctl.
+/// Build an ioctl number for a write-only ioctl.
#[inline(always)]
pub const fn _IOW<T>(ty: u32, nr: u32) -> u32 {
_IOC(uapi::_IOC_WRITE, ty, nr, core::mem::size_of::<T>())
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 75efe47522..6858e2f8a3 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -14,11 +14,9 @@
#![no_std]
#![feature(allocator_api)]
#![feature(coerce_unsized)]
-#![feature(const_maybe_uninit_zeroed)]
#![feature(dispatch_from_dyn)]
#![feature(new_uninit)]
#![feature(offset_of)]
-#![feature(ptr_metadata)]
#![feature(receiver_trait)]
#![feature(unsize)]
@@ -49,6 +47,7 @@ pub mod std_vendor;
pub mod str;
pub mod sync;
pub mod task;
+pub mod time;
pub mod types;
pub mod workqueue;
@@ -78,7 +77,7 @@ pub trait Module: Sized + Sync + Send {
/// Equivalent to `THIS_MODULE` in the C API.
///
-/// C header: `include/linux/export.h`
+/// C header: [`include/linux/export.h`](srctree/include/linux/export.h)
pub struct ThisModule(*mut bindings::module);
// SAFETY: `THIS_MODULE` may be used from all threads within a module.
@@ -102,3 +101,35 @@ fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
// SAFETY: FFI call.
unsafe { bindings::BUG() };
}
+
+/// Produces a pointer to an object from a pointer to one of its fields.
+///
+/// # Safety
+///
+/// The pointer passed to this macro, and the pointer returned by this macro, must both be in
+/// bounds of the same allocation.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::container_of;
+/// struct Test {
+/// a: u64,
+/// b: u32,
+/// }
+///
+/// let test = Test { a: 10, b: 20 };
+/// let b_ptr = &test.b;
+/// // SAFETY: The pointer points at the `b` field of a `Test`, so the resulting pointer will be
+/// // in-bounds of the same allocation as `b_ptr`.
+/// let test_alias = unsafe { container_of!(b_ptr, Test, b) };
+/// assert!(core::ptr::eq(&test, test_alias));
+/// ```
+#[macro_export]
+macro_rules! container_of {
+ ($ptr:expr, $type:ty, $($f:tt)*) => {{
+ let ptr = $ptr as *const _ as *const u8;
+ let offset: usize = ::core::mem::offset_of!($type, $($f)*);
+ ptr.sub(offset) as *const $type
+ }}
+}
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index 869797745b..265d0e1c13 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -4,7 +4,7 @@
//! Network PHY device.
//!
-//! C headers: [`include/linux/phy.h`](../../../../../../../include/linux/phy.h).
+//! C headers: [`include/linux/phy.h`](srctree/include/linux/phy.h).
use crate::{bindings, error::*, prelude::*, str::CStr, types::Opaque};
@@ -16,7 +16,7 @@ use core::marker::PhantomData;
///
/// Some of PHY drivers access to the state of PHY's software state machine.
///
-/// [`enum phy_state`]: ../../../../../../../include/linux/phy.h
+/// [`enum phy_state`]: srctree/include/linux/phy.h
#[derive(PartialEq, Eq)]
pub enum DeviceState {
/// PHY device and driver are not ready for anything.
@@ -61,7 +61,7 @@ pub enum DuplexMode {
/// Referencing a `phy_device` using this struct asserts that you are in
/// a context where all methods defined on this struct are safe to call.
///
-/// [`struct phy_device`]: ../../../../../../../include/linux/phy.h
+/// [`struct phy_device`]: srctree/include/linux/phy.h
// During the calls to most functions in [`Driver`], the C side (`PHYLIB`) holds a lock that is
// unique for every instance of [`Device`]. `PHYLIB` uses a different serialization technique for
// [`Driver::resume`] and [`Driver::suspend`]: `PHYLIB` updates `phy_device`'s state with
@@ -486,7 +486,7 @@ impl<T: Driver> Adapter<T> {
///
/// `self.0` is always in a valid state.
///
-/// [`struct phy_driver`]: ../../../../../../../include/linux/phy.h
+/// [`struct phy_driver`]: srctree/include/linux/phy.h
#[repr(transparent)]
pub struct DriverVTable(Opaque<bindings::phy_driver>);
@@ -580,12 +580,12 @@ pub trait Driver {
/// Issues a PHY software reset.
fn soft_reset(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Probes the hardware to determine what abilities it has.
fn get_features(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Returns true if this is a suitable driver for the given phydev.
@@ -597,32 +597,32 @@ pub trait Driver {
/// Configures the advertisement and resets auto-negotiation
/// if auto-negotiation is enabled.
fn config_aneg(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Determines the negotiated speed and duplex.
fn read_status(_dev: &mut Device) -> Result<u16> {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Suspends the hardware, saving state if needed.
fn suspend(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Resumes the hardware, restoring state if needed.
fn resume(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD read function for reading a MMD register.
fn read_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16) -> Result<u16> {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD write function for writing a MMD register.
fn write_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16, _val: u16) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Callback for notification of link change.
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index 7d848b83ad..925ced8fdc 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -13,9 +13,102 @@ use crate::{
};
/// Byte string without UTF-8 validity guarantee.
-///
-/// `BStr` is simply an alias to `[u8]`, but has a more evident semantical meaning.
-pub type BStr = [u8];
+#[repr(transparent)]
+pub struct BStr([u8]);
+
+impl BStr {
+ /// Returns the length of this string.
+ #[inline]
+ pub const fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ /// Returns `true` if the string is empty.
+ #[inline]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Creates a [`BStr`] from a `[u8]`.
+ #[inline]
+ pub const fn from_bytes(bytes: &[u8]) -> &Self {
+ // SAFETY: `BStr` is transparent to `[u8]`.
+ unsafe { &*(bytes as *const [u8] as *const BStr) }
+ }
+}
+
+impl fmt::Display for BStr {
+ /// Formats printable ASCII characters, escaping the rest.
+ ///
+ /// ```
+ /// # use kernel::{fmt, b_str, str::{BStr, CString}};
+ /// let ascii = b_str!("Hello, BStr!");
+ /// let s = CString::try_from_fmt(fmt!("{}", ascii)).unwrap();
+ /// assert_eq!(s.as_bytes(), "Hello, BStr!".as_bytes());
+ ///
+ /// let non_ascii = b_str!("🦀");
+ /// let s = CString::try_from_fmt(fmt!("{}", non_ascii)).unwrap();
+ /// assert_eq!(s.as_bytes(), "\\xf0\\x9f\\xa6\\x80".as_bytes());
+ /// ```
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for &b in &self.0 {
+ match b {
+ // Common escape codes.
+ b'\t' => f.write_str("\\t")?,
+ b'\n' => f.write_str("\\n")?,
+ b'\r' => f.write_str("\\r")?,
+ // Printable characters.
+ 0x20..=0x7e => f.write_char(b as char)?,
+ _ => write!(f, "\\x{:02x}", b)?,
+ }
+ }
+ Ok(())
+ }
+}
+
+impl fmt::Debug for BStr {
+ /// Formats printable ASCII characters with a double quote on either end,
+ /// escaping the rest.
+ ///
+ /// ```
+ /// # use kernel::{fmt, b_str, str::{BStr, CString}};
+ /// // Embedded double quotes are escaped.
+ /// let ascii = b_str!("Hello, \"BStr\"!");
+ /// let s = CString::try_from_fmt(fmt!("{:?}", ascii)).unwrap();
+ /// assert_eq!(s.as_bytes(), "\"Hello, \\\"BStr\\\"!\"".as_bytes());
+ ///
+ /// let non_ascii = b_str!("😺");
+ /// let s = CString::try_from_fmt(fmt!("{:?}", non_ascii)).unwrap();
+ /// assert_eq!(s.as_bytes(), "\"\\xf0\\x9f\\x98\\xba\"".as_bytes());
+ /// ```
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_char('"')?;
+ for &b in &self.0 {
+ match b {
+ // Common escape codes.
+ b'\t' => f.write_str("\\t")?,
+ b'\n' => f.write_str("\\n")?,
+ b'\r' => f.write_str("\\r")?,
+ // String escape characters.
+ b'\"' => f.write_str("\\\"")?,
+ b'\\' => f.write_str("\\\\")?,
+ // Printable characters.
+ 0x20..=0x7e => f.write_char(b as char)?,
+ _ => write!(f, "\\x{:02x}", b)?,
+ }
+ }
+ f.write_char('"')
+ }
+}
+
+impl Deref for BStr {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
/// Creates a new [`BStr`] from a string literal.
///
@@ -33,7 +126,7 @@ pub type BStr = [u8];
macro_rules! b_str {
($str:literal) => {{
const S: &'static str = $str;
- const C: &'static $crate::str::BStr = S.as_bytes();
+ const C: &'static $crate::str::BStr = $crate::str::BStr::from_bytes(S.as_bytes());
C
}};
}
@@ -149,13 +242,13 @@ impl CStr {
self.0.as_ptr() as _
}
- /// Convert the string to a byte slice without the trailing 0 byte.
+ /// Convert the string to a byte slice without the trailing `NUL` byte.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
&self.0[..self.len()]
}
- /// Convert the string to a byte slice containing the trailing 0 byte.
+ /// Convert the string to a byte slice containing the trailing `NUL` byte.
#[inline]
pub const fn as_bytes_with_nul(&self) -> &[u8] {
&self.0
@@ -191,9 +284,9 @@ impl CStr {
/// ```
/// # use kernel::c_str;
/// # use kernel::str::CStr;
+ /// let bar = c_str!("ツ");
/// // SAFETY: String literals are guaranteed to be valid UTF-8
/// // by the Rust compiler.
- /// let bar = c_str!("ツ");
/// assert_eq!(unsafe { bar.as_str_unchecked() }, "ツ");
/// ```
#[inline]
@@ -271,7 +364,7 @@ impl fmt::Debug for CStr {
impl AsRef<BStr> for CStr {
#[inline]
fn as_ref(&self) -> &BStr {
- self.as_bytes()
+ BStr::from_bytes(self.as_bytes())
}
}
@@ -280,7 +373,7 @@ impl Deref for CStr {
#[inline]
fn deref(&self) -> &Self::Target {
- self.as_bytes()
+ self.as_ref()
}
}
@@ -327,7 +420,7 @@ where
#[inline]
fn index(&self, index: Idx) -> &Self::Output {
- &self.as_bytes()[index]
+ &self.as_ref()[index]
}
}
@@ -357,6 +450,21 @@ macro_rules! c_str {
#[cfg(test)]
mod tests {
use super::*;
+ use alloc::format;
+
+ const ALL_ASCII_CHARS: &'static str =
+ "\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\
+ \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f \
+ !\"#$%&'()*+,-./0123456789:;<=>?@\
+ ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f\
+ \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\
+ \\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\
+ \\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\
+ \\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\
+ \\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\
+ \\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\
+ \\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\
+ \\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff";
#[test]
fn test_cstr_to_str() {
@@ -381,6 +489,69 @@ mod tests {
let unchecked_str = unsafe { checked_cstr.as_str_unchecked() };
assert_eq!(unchecked_str, "🐧");
}
+
+ #[test]
+ fn test_cstr_display() {
+ let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
+ assert_eq!(format!("{}", hello_world), "hello, world!");
+ let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
+ assert_eq!(format!("{}", non_printables), "\\x01\\x09\\x0a");
+ let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
+ assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
+ let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
+ assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
+ }
+
+ #[test]
+ fn test_cstr_display_all_bytes() {
+ let mut bytes: [u8; 256] = [0; 256];
+ // fill `bytes` with [1..=255] + [0]
+ for i in u8::MIN..=u8::MAX {
+ bytes[i as usize] = i.wrapping_add(1);
+ }
+ let cstr = CStr::from_bytes_with_nul(&bytes).unwrap();
+ assert_eq!(format!("{}", cstr), ALL_ASCII_CHARS);
+ }
+
+ #[test]
+ fn test_cstr_debug() {
+ let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
+ assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
+ let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
+ assert_eq!(format!("{:?}", non_printables), "\"\\x01\\x09\\x0a\"");
+ let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
+ assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
+ let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
+ assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
+ }
+
+ #[test]
+ fn test_bstr_display() {
+ let hello_world = BStr::from_bytes(b"hello, world!");
+ assert_eq!(format!("{}", hello_world), "hello, world!");
+ let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
+ assert_eq!(format!("{}", escapes), "_\\t_\\n_\\r_\\_'_\"_");
+ let others = BStr::from_bytes(b"\x01");
+ assert_eq!(format!("{}", others), "\\x01");
+ let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
+ assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
+ let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
+ assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
+ }
+
+ #[test]
+ fn test_bstr_debug() {
+ let hello_world = BStr::from_bytes(b"hello, world!");
+ assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
+ let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
+ assert_eq!(format!("{:?}", escapes), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\"");
+ let others = BStr::from_bytes(b"\x01");
+ assert_eq!(format!("{:?}", others), "\"\\x01\"");
+ let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
+ assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
+ let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
+ assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
+ }
}
/// Allows formatting of [`fmt::Arguments`] into a raw buffer.
@@ -449,7 +620,7 @@ impl RawFormatter {
self.pos as _
}
- /// Return the number of bytes written to the formatter.
+ /// Returns the number of bytes written to the formatter.
pub(crate) fn bytes_written(&self) -> usize {
self.pos - self.beg
}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index d219ee518e..c983f63fd5 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -13,8 +13,9 @@ pub mod lock;
mod locked_by;
pub use arc::{Arc, ArcBorrow, UniqueArc};
-pub use condvar::CondVar;
-pub use lock::{mutex::Mutex, spinlock::SpinLock};
+pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
+pub use lock::mutex::{new_mutex, Mutex};
+pub use lock::spinlock::{new_spinlock, SpinLock};
pub use locked_by::LockedBy;
/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 77cdbcf7bd..7d4c4bf583 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -30,7 +30,7 @@ use core::{
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
pin::Pin,
- ptr::{NonNull, Pointee},
+ ptr::NonNull,
};
use macros::pin_data;
@@ -56,7 +56,7 @@ mod std_vendor;
/// b: u32,
/// }
///
-/// // Create a ref-counted instance of `Example`.
+/// // Create a refcounted instance of `Example`.
/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
///
/// // Get a new pointer to `obj` and increment the refcount.
@@ -239,22 +239,20 @@ impl<T: ?Sized> Arc<T> {
// binary, so its layout is not so large that it can trigger arithmetic overflow.
let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 };
- let metadata: <T as Pointee>::Metadata = core::ptr::metadata(ptr);
- // SAFETY: The metadata of `T` and `ArcInner<T>` is the same because `ArcInner` is a struct
- // with `T` as its last field.
+ // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and
+ // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field.
//
// This is documented at:
// <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>.
- let metadata: <ArcInner<T> as Pointee>::Metadata =
- unsafe { core::mem::transmute_copy(&metadata) };
+ let ptr = ptr as *const ArcInner<T>;
+
// SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the
// pointer, since it originates from a previous call to `Arc::into_raw` and is still valid.
- let ptr = unsafe { (ptr as *mut u8).sub(val_offset) as *mut () };
- let ptr = core::ptr::from_raw_parts_mut(ptr, metadata);
+ let ptr = unsafe { ptr.byte_sub(val_offset) };
// SAFETY: By the safety requirements we know that `ptr` came from `Arc::into_raw`, so the
// reference count held then will be owned by the new `Arc` object.
- unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ unsafe { Self::from_inner(NonNull::new_unchecked(ptr.cast_mut())) }
}
/// Returns an [`ArcBorrow`] from the given [`Arc`].
@@ -365,12 +363,12 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// A borrowed reference to an [`Arc`] instance.
///
/// For cases when one doesn't ever need to increment the refcount on the allocation, it is simpler
-/// to use just `&T`, which we can trivially get from an `Arc<T>` instance.
+/// to use just `&T`, which we can trivially get from an [`Arc<T>`] instance.
///
/// However, when one may need to increment the refcount, it is preferable to use an `ArcBorrow<T>`
/// over `&Arc<T>` because the latter results in a double-indirection: a pointer (shared reference)
-/// to a pointer (`Arc<T>`) to the object (`T`). An [`ArcBorrow`] eliminates this double
-/// indirection while still allowing one to increment the refcount and getting an `Arc<T>` when/if
+/// to a pointer ([`Arc<T>`]) to the object (`T`). An [`ArcBorrow`] eliminates this double
+/// indirection while still allowing one to increment the refcount and getting an [`Arc<T>`] when/if
/// needed.
///
/// # Invariants
@@ -510,7 +508,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// # test().unwrap();
/// ```
///
-/// In the following example we first allocate memory for a ref-counted `Example` but we don't
+/// In the following example we first allocate memory for a refcounted `Example` but we don't
/// initialise it on allocation. We do initialise it later with a call to [`UniqueArc::write`],
/// followed by a conversion to `Arc<Example>`. This is particularly useful when allocation happens
/// in one context (e.g., sleepable) and initialisation in another (e.g., atomic):
@@ -560,7 +558,7 @@ impl<T> UniqueArc<T> {
/// Tries to allocate a new [`UniqueArc`] instance.
pub fn try_new(value: T) -> Result<Self, AllocError> {
Ok(Self {
- // INVARIANT: The newly-created object has a ref-count of 1.
+ // INVARIANT: The newly-created object has a refcount of 1.
inner: Arc::try_new(value)?,
})
}
@@ -574,7 +572,7 @@ impl<T> UniqueArc<T> {
data <- init::uninit::<T, AllocError>(),
}? AllocError))?;
Ok(UniqueArc {
- // INVARIANT: The newly-created object has a ref-count of 1.
+ // INVARIANT: The newly-created object has a refcount of 1.
// SAFETY: The pointer from the `Box` is valid.
inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
})
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index f65e19d5a3..0c3671caff 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -6,8 +6,18 @@
//! variable.
use super::{lock::Backend, lock::Guard, LockClassKey};
-use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque};
+use crate::{
+ bindings,
+ init::PinInit,
+ pin_init,
+ str::CStr,
+ task::{MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE},
+ time::Jiffies,
+ types::Opaque,
+};
+use core::ffi::{c_int, c_long};
use core::marker::PhantomPinned;
+use core::ptr;
use macros::pin_data;
/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class.
@@ -17,6 +27,7 @@ macro_rules! new_condvar {
$crate::sync::CondVar::new($crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_condvar;
/// A conditional variable.
///
@@ -34,8 +45,7 @@ macro_rules! new_condvar {
/// The following is an example of using a condvar with a mutex:
///
/// ```
-/// use kernel::sync::{CondVar, Mutex};
-/// use kernel::{new_condvar, new_mutex};
+/// use kernel::sync::{new_condvar, new_mutex, CondVar, Mutex};
///
/// #[pin_data]
/// pub struct Example {
@@ -73,10 +83,12 @@ macro_rules! new_condvar {
#[pin_data]
pub struct CondVar {
#[pin]
- pub(crate) wait_list: Opaque<bindings::wait_queue_head>,
+ pub(crate) wait_queue_head: Opaque<bindings::wait_queue_head>,
/// A condvar needs to be pinned because it contains a [`struct list_head`] that is
/// self-referential, so it cannot be safely moved once it is initialised.
+ ///
+ /// [`struct list_head`]: srctree/include/linux/types.h
#[pin]
_pin: PhantomPinned,
}
@@ -96,28 +108,35 @@ impl CondVar {
_pin: PhantomPinned,
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
// static lifetimes so they live indefinitely.
- wait_list <- Opaque::ffi_init(|slot| unsafe {
+ wait_queue_head <- Opaque::ffi_init(|slot| unsafe {
bindings::__init_waitqueue_head(slot, name.as_char_ptr(), key.as_ptr())
}),
})
}
- fn wait_internal<T: ?Sized, B: Backend>(&self, wait_state: u32, guard: &mut Guard<'_, T, B>) {
+ fn wait_internal<T: ?Sized, B: Backend>(
+ &self,
+ wait_state: c_int,
+ guard: &mut Guard<'_, T, B>,
+ timeout_in_jiffies: c_long,
+ ) -> c_long {
let wait = Opaque::<bindings::wait_queue_entry>::uninit();
// SAFETY: `wait` points to valid memory.
unsafe { bindings::init_wait(wait.get()) };
- // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ // SAFETY: Both `wait` and `wait_queue_head` point to valid memory.
unsafe {
- bindings::prepare_to_wait_exclusive(self.wait_list.get(), wait.get(), wait_state as _)
+ bindings::prepare_to_wait_exclusive(self.wait_queue_head.get(), wait.get(), wait_state)
};
- // SAFETY: No arguments, switches to another thread.
- guard.do_unlocked(|| unsafe { bindings::schedule() });
+ // SAFETY: Switches to another thread. The timeout can be any number.
+ let ret = guard.do_unlocked(|| unsafe { bindings::schedule_timeout(timeout_in_jiffies) });
+
+ // SAFETY: Both `wait` and `wait_queue_head` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_queue_head.get(), wait.get()) };
- // SAFETY: Both `wait` and `wait_list` point to valid memory.
- unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) };
+ ret
}
/// Releases the lock and waits for a notification in uninterruptible mode.
@@ -127,7 +146,7 @@ impl CondVar {
/// [`CondVar::notify_one`] or [`CondVar::notify_all`]. Note that it may also wake up
/// spuriously.
pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) {
- self.wait_internal(bindings::TASK_UNINTERRUPTIBLE, guard);
+ self.wait_internal(TASK_UNINTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT);
}
/// Releases the lock and waits for a notification in interruptible mode.
@@ -138,29 +157,60 @@ impl CondVar {
/// Returns whether there is a signal pending.
#[must_use = "wait_interruptible returns if a signal is pending, so the caller must check the return value"]
pub fn wait_interruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool {
- self.wait_internal(bindings::TASK_INTERRUPTIBLE, guard);
+ self.wait_internal(TASK_INTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT);
crate::current!().signal_pending()
}
- /// Calls the kernel function to notify the appropriate number of threads with the given flags.
- fn notify(&self, count: i32, flags: u32) {
- // SAFETY: `wait_list` points to valid memory.
+ /// Releases the lock and waits for a notification in interruptible mode.
+ ///
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or
+ /// [`CondVar::notify_all`], or when a timeout occurs, or when the thread receives a signal.
+ #[must_use = "wait_interruptible_timeout returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait_interruptible_timeout<T: ?Sized, B: Backend>(
+ &self,
+ guard: &mut Guard<'_, T, B>,
+ jiffies: Jiffies,
+ ) -> CondVarTimeoutResult {
+ let jiffies = jiffies.try_into().unwrap_or(MAX_SCHEDULE_TIMEOUT);
+ let res = self.wait_internal(TASK_INTERRUPTIBLE, guard, jiffies);
+
+ match (res as Jiffies, crate::current!().signal_pending()) {
+ (jiffies, true) => CondVarTimeoutResult::Signal { jiffies },
+ (0, false) => CondVarTimeoutResult::Timeout,
+ (jiffies, false) => CondVarTimeoutResult::Woken { jiffies },
+ }
+ }
+
+ /// Calls the kernel function to notify the appropriate number of threads.
+ fn notify(&self, count: c_int) {
+ // SAFETY: `wait_queue_head` points to valid memory.
unsafe {
bindings::__wake_up(
- self.wait_list.get(),
- bindings::TASK_NORMAL,
+ self.wait_queue_head.get(),
+ TASK_NORMAL,
count,
- flags as _,
+ ptr::null_mut(),
)
};
}
+ /// Calls the kernel function to notify one thread synchronously.
+ ///
+ /// This method behaves like `notify_one`, except that it hints to the scheduler that the
+ /// current thread is about to go to sleep, so it should schedule the target thread on the same
+ /// CPU.
+ pub fn notify_sync(&self) {
+ // SAFETY: `wait_queue_head` points to valid memory.
+ unsafe { bindings::__wake_up_sync(self.wait_queue_head.get(), TASK_NORMAL) };
+ }
+
/// Wakes a single waiter up, if any.
///
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
pub fn notify_one(&self) {
- self.notify(1, 0);
+ self.notify(1);
}
/// Wakes all waiters up, if any.
@@ -168,6 +218,22 @@ impl CondVar {
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
pub fn notify_all(&self) {
- self.notify(0, 0);
+ self.notify(0);
}
}
+
+/// The return type of `wait_timeout`.
+pub enum CondVarTimeoutResult {
+ /// The timeout was reached.
+ Timeout,
+ /// Somebody woke us up.
+ Woken {
+ /// Remaining sleep duration.
+ jiffies: Jiffies,
+ },
+ /// A signal occurred.
+ Signal {
+ /// Remaining sleep duration.
+ jiffies: Jiffies,
+ },
+}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index f12a684bc9..5b5c8efe42 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -21,14 +21,21 @@ pub mod spinlock;
/// # Safety
///
/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
-/// is owned, that is, between calls to `lock` and `unlock`.
-/// - Implementers must also ensure that `relock` uses the same locking method as the original
-/// lock operation.
+/// is owned, that is, between calls to [`lock`] and [`unlock`].
+/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
+/// lock operation.
+///
+/// [`lock`]: Backend::lock
+/// [`unlock`]: Backend::unlock
+/// [`relock`]: Backend::relock
pub unsafe trait Backend {
/// The state required by the lock.
type State;
- /// The state required to be kept between lock and unlock.
+ /// The state required to be kept between [`lock`] and [`unlock`].
+ ///
+ /// [`lock`]: Backend::lock
+ /// [`unlock`]: Backend::unlock
type GuardState;
/// Initialises the lock.
@@ -139,7 +146,7 @@ pub struct Guard<'a, T: ?Sized, B: Backend> {
unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
- pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) {
+ pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
@@ -147,7 +154,7 @@ impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
let _relock =
ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
- cb();
+ cb()
}
}
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 8c524a3ec4..ef4c4634d2 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -17,6 +17,7 @@ macro_rules! new_mutex {
$inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_mutex;
/// A mutual exclusion primitive.
///
@@ -35,7 +36,7 @@ macro_rules! new_mutex {
/// contains an inner struct (`Inner`) that is protected by a mutex.
///
/// ```
-/// use kernel::{init::InPlaceInit, init::PinInit, new_mutex, pin_init, sync::Mutex};
+/// use kernel::sync::{new_mutex, Mutex};
///
/// struct Inner {
/// a: u32,
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 068535ce1b..0b22c63563 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -17,6 +17,7 @@ macro_rules! new_spinlock {
$inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_spinlock;
/// A spinlock.
///
@@ -33,7 +34,7 @@ macro_rules! new_spinlock {
/// contains an inner struct (`Inner`) that is protected by a spinlock.
///
/// ```
-/// use kernel::{init::InPlaceInit, init::PinInit, new_spinlock, pin_init, sync::SpinLock};
+/// use kernel::sync::{new_spinlock, SpinLock};
///
/// struct Inner {
/// a: u32,
@@ -112,7 +113,7 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
- // caller is the owner of the mutex.
+ // caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
index b17ee5cd98..babc731bd5 100644
--- a/rust/kernel/sync/locked_by.rs
+++ b/rust/kernel/sync/locked_by.rs
@@ -9,14 +9,17 @@ use core::{cell::UnsafeCell, mem::size_of, ptr};
/// Allows access to some data to be serialised by a lock that does not wrap it.
///
/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g.,
-/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not
-/// possible. For example, if a container has a lock and some data in the contained elements needs
+/// [`Mutex`] or [`SpinLock`]. [`LockedBy`] is meant for cases when this is not possible.
+/// For example, if a container has a lock and some data in the contained elements needs
/// to be protected by the same lock.
///
/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it
/// when the caller shows evidence that the 'external' lock is locked. It panics if the evidence
/// refers to the wrong instance of the lock.
///
+/// [`Mutex`]: super::Mutex
+/// [`SpinLock`]: super::SpinLock
+///
/// # Examples
///
/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
index 9451932d5d..ca6e7e31d7 100644
--- a/rust/kernel/task.rs
+++ b/rust/kernel/task.rs
@@ -5,7 +5,23 @@
//! C header: [`include/linux/sched.h`](srctree/include/linux/sched.h).
use crate::{bindings, types::Opaque};
-use core::{marker::PhantomData, ops::Deref, ptr};
+use core::{
+ ffi::{c_int, c_long, c_uint},
+ marker::PhantomData,
+ ops::Deref,
+ ptr,
+};
+
+/// A sentinel value used for infinite timeouts.
+pub const MAX_SCHEDULE_TIMEOUT: c_long = c_long::MAX;
+
+/// Bitmask for tasks that are sleeping in an interruptible state.
+pub const TASK_INTERRUPTIBLE: c_int = bindings::TASK_INTERRUPTIBLE as c_int;
+/// Bitmask for tasks that are sleeping in an uninterruptible state.
+pub const TASK_UNINTERRUPTIBLE: c_int = bindings::TASK_UNINTERRUPTIBLE as c_int;
+/// Convenience constant for waking up tasks regardless of whether they are in interruptible or
+/// uninterruptible sleep.
+pub const TASK_NORMAL: c_uint = bindings::TASK_NORMAL as c_uint;
/// Returns the currently running task.
#[macro_export]
@@ -23,7 +39,7 @@ macro_rules! current {
///
/// All instances are valid tasks created by the C portion of the kernel.
///
-/// Instances of this type are always ref-counted, that is, a call to `get_task_struct` ensures
+/// Instances of this type are always refcounted, that is, a call to `get_task_struct` ensures
/// that the allocation remains valid at least until the matching call to `put_task_struct`.
///
/// # Examples
@@ -116,7 +132,7 @@ impl Task {
/// Returns the group leader of the given task.
pub fn group_leader(&self) -> &Task {
// SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always
- // have a valid group_leader.
+ // have a valid `group_leader`.
let ptr = unsafe { *ptr::addr_of!((*self.0.get()).group_leader) };
// SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`,
@@ -147,7 +163,7 @@ impl Task {
}
}
-// SAFETY: The type invariants guarantee that `Task` is always ref-counted.
+// SAFETY: The type invariants guarantee that `Task` is always refcounted.
unsafe impl crate::types::AlwaysRefCounted for Task {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
new file mode 100644
index 0000000000..25a896eed4
--- /dev/null
+++ b/rust/kernel/time.rs
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Time related primitives.
+//!
+//! This module contains the kernel APIs related to time and timers that
+//! have been ported or wrapped for usage by Rust code in the kernel.
+
+/// The time unit of Linux kernel. One jiffy equals (1/HZ) second.
+pub type Jiffies = core::ffi::c_ulong;
+
+/// The millisecond time unit.
+pub type Msecs = core::ffi::c_uint;
+
+/// Converts milliseconds to jiffies.
+#[inline]
+pub fn msecs_to_jiffies(msecs: Msecs) -> Jiffies {
+ // SAFETY: The `__msecs_to_jiffies` function is always safe to call no
+ // matter what the argument is.
+ unsafe { bindings::__msecs_to_jiffies(msecs) }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index fdb778e65d..aa77bad9bc 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -46,6 +46,25 @@ pub trait ForeignOwnable: Sized {
/// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for
/// this object must have been dropped.
unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self;
+
+ /// Tries to convert a foreign-owned object back to a Rust-owned one.
+ ///
+ /// A convenience wrapper over [`ForeignOwnable::from_foreign`] that returns [`None`] if `ptr`
+ /// is null.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must either be null or satisfy the safety requirements for
+ /// [`ForeignOwnable::from_foreign`].
+ unsafe fn try_from_foreign(ptr: *const core::ffi::c_void) -> Option<Self> {
+ if ptr.is_null() {
+ None
+ } else {
+ // SAFETY: Since `ptr` is not null here, then `ptr` satisfies the safety requirements
+ // of `from_foreign` given the safety requirements of this function.
+ unsafe { Some(Self::from_foreign(ptr)) }
+ }
+ }
}
impl<T: 'static> ForeignOwnable for Box<T> {
@@ -90,6 +109,7 @@ impl ForeignOwnable for () {
///
/// In the example below, we have multiple exit paths and we want to log regardless of which one is
/// taken:
+///
/// ```
/// # use kernel::types::ScopeGuard;
/// fn example1(arg: bool) {
@@ -108,6 +128,7 @@ impl ForeignOwnable for () {
///
/// In the example below, we want to log the same message on all early exits but a different one on
/// the main exit path:
+///
/// ```
/// # use kernel::types::ScopeGuard;
/// fn example2(arg: bool) {
@@ -129,6 +150,7 @@ impl ForeignOwnable for () {
///
/// In the example below, we need a mutable object (the vector) to be accessible within the log
/// function, so we wrap it in the [`ScopeGuard`]:
+///
/// ```
/// # use kernel::types::ScopeGuard;
/// fn example3(arg: bool) -> Result {
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index 4983978773..480cb292e7 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -12,19 +12,19 @@
//!
//! # The raw API
//!
-//! The raw API consists of the `RawWorkItem` trait, where the work item needs to provide an
+//! The raw API consists of the [`RawWorkItem`] trait, where the work item needs to provide an
//! arbitrary function that knows how to enqueue the work item. It should usually not be used
//! directly, but if you want to, you can use it without using the pieces from the safe API.
//!
//! # The safe API
//!
-//! The safe API is used via the `Work` struct and `WorkItem` traits. Furthermore, it also includes
-//! a trait called `WorkItemPointer`, which is usually not used directly by the user.
+//! The safe API is used via the [`Work`] struct and [`WorkItem`] traits. Furthermore, it also
+//! includes a trait called [`WorkItemPointer`], which is usually not used directly by the user.
//!
-//! * The `Work` struct is the Rust wrapper for the C `work_struct` type.
-//! * The `WorkItem` trait is implemented for structs that can be enqueued to a workqueue.
-//! * The `WorkItemPointer` trait is implemented for the pointer type that points at a something
-//! that implements `WorkItem`.
+//! * The [`Work`] struct is the Rust wrapper for the C `work_struct` type.
+//! * The [`WorkItem`] trait is implemented for structs that can be enqueued to a workqueue.
+//! * The [`WorkItemPointer`] trait is implemented for the pointer type that points at a something
+//! that implements [`WorkItem`].
//!
//! ## Example
//!
@@ -35,8 +35,7 @@
//! ```
//! use kernel::prelude::*;
//! use kernel::sync::Arc;
-//! use kernel::workqueue::{self, Work, WorkItem};
-//! use kernel::{impl_has_work, new_work};
+//! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
//!
//! #[pin_data]
//! struct MyStruct {
@@ -78,8 +77,7 @@
//! ```
//! use kernel::prelude::*;
//! use kernel::sync::Arc;
-//! use kernel::workqueue::{self, Work, WorkItem};
-//! use kernel::{impl_has_work, new_work};
+//! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
//!
//! #[pin_data]
//! struct MyStruct {
@@ -147,6 +145,7 @@ macro_rules! new_work {
$crate::workqueue::Work::new($crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_work;
/// A kernel work queue.
///
@@ -168,7 +167,7 @@ impl Queue {
/// # Safety
///
/// The caller must ensure that the provided raw pointer is not dangling, that it points at a
- /// valid workqueue, and that it remains valid until the end of 'a.
+ /// valid workqueue, and that it remains valid until the end of `'a`.
pub unsafe fn from_raw<'a>(ptr: *const bindings::workqueue_struct) -> &'a Queue {
// SAFETY: The `Queue` type is `#[repr(transparent)]`, so the pointer cast is valid. The
// caller promises that the pointer is not dangling.
@@ -199,7 +198,11 @@ impl Queue {
// stay valid until we call the function pointer in the `work_struct`, so the access is ok.
unsafe {
w.__enqueue(move |work_ptr| {
- bindings::queue_work_on(bindings::WORK_CPU_UNBOUND as _, queue_ptr, work_ptr)
+ bindings::queue_work_on(
+ bindings::wq_misc_consts_WORK_CPU_UNBOUND as _,
+ queue_ptr,
+ work_ptr,
+ )
})
}
}
@@ -218,7 +221,9 @@ impl Queue {
}
}
-/// A helper type used in `try_spawn`.
+/// A helper type used in [`try_spawn`].
+///
+/// [`try_spawn`]: Queue::try_spawn
#[pin_data]
struct ClosureWork<T> {
#[pin]
@@ -253,14 +258,16 @@ impl<T: FnOnce()> WorkItem for ClosureWork<T> {
/// actual value of the id is not important as long as you use different ids for different fields
/// of the same struct. (Fields of different structs need not use different ids.)
///
-/// Note that the id is used only to select the right method to call during compilation. It wont be
+/// Note that the id is used only to select the right method to call during compilation. It won't be
/// part of the final executable.
///
/// # Safety
///
-/// Implementers must ensure that any pointers passed to a `queue_work_on` closure by `__enqueue`
+/// Implementers must ensure that any pointers passed to a `queue_work_on` closure by [`__enqueue`]
/// remain valid for the duration specified in the guarantees section of the documentation for
-/// `__enqueue`.
+/// [`__enqueue`].
+///
+/// [`__enqueue`]: RawWorkItem::__enqueue
pub unsafe trait RawWorkItem<const ID: u64> {
/// The return type of [`Queue::enqueue`].
type EnqueueOutput;
@@ -290,10 +297,11 @@ pub unsafe trait RawWorkItem<const ID: u64> {
/// Defines the method that should be called directly when a work item is executed.
///
-/// This trait is implemented by `Pin<Box<T>>` and `Arc<T>`, and is mainly intended to be
+/// This trait is implemented by `Pin<Box<T>>` and [`Arc<T>`], and is mainly intended to be
/// implemented for smart pointer types. For your own structs, you would implement [`WorkItem`]
-/// instead. The `run` method on this trait will usually just perform the appropriate
-/// `container_of` translation and then call into the `run` method from the [`WorkItem`] trait.
+/// instead. The [`run`] method on this trait will usually just perform the appropriate
+/// `container_of` translation and then call into the [`run`][WorkItem::run] method from the
+/// [`WorkItem`] trait.
///
/// This trait is used when the `work_struct` field is defined using the [`Work`] helper.
///
@@ -309,8 +317,10 @@ pub unsafe trait WorkItemPointer<const ID: u64>: RawWorkItem<ID> {
///
/// # Safety
///
- /// The provided `work_struct` pointer must originate from a previous call to `__enqueue` where
- /// the `queue_work_on` closure returned true, and the pointer must still be valid.
+ /// The provided `work_struct` pointer must originate from a previous call to [`__enqueue`]
+ /// where the `queue_work_on` closure returned true, and the pointer must still be valid.
+ ///
+ /// [`__enqueue`]: RawWorkItem::__enqueue
unsafe extern "C" fn run(ptr: *mut bindings::work_struct);
}
@@ -328,12 +338,14 @@ pub trait WorkItem<const ID: u64 = 0> {
/// Links for a work item.
///
-/// This struct contains a function pointer to the `run` function from the [`WorkItemPointer`]
+/// This struct contains a function pointer to the [`run`] function from the [`WorkItemPointer`]
/// trait, and defines the linked list pointers necessary to enqueue a work item in a workqueue.
///
/// Wraps the kernel's C `struct work_struct`.
///
/// This is a helper type used to associate a `work_struct` with the [`WorkItem`] that uses it.
+///
+/// [`run`]: WorkItemPointer::run
#[repr(transparent)]
pub struct Work<T: ?Sized, const ID: u64 = 0> {
work: Opaque<bindings::work_struct>,
@@ -396,9 +408,8 @@ impl<T: ?Sized, const ID: u64> Work<T, ID> {
/// like this:
///
/// ```no_run
-/// use kernel::impl_has_work;
/// use kernel::prelude::*;
-/// use kernel::workqueue::Work;
+/// use kernel::workqueue::{impl_has_work, Work};
///
/// struct MyWorkItem {
/// work_field: Work<MyWorkItem, 1>,
@@ -409,28 +420,25 @@ impl<T: ?Sized, const ID: u64> Work<T, ID> {
/// }
/// ```
///
-/// Note that since the `Work` type is annotated with an id, you can have several `work_struct`
+/// Note that since the [`Work`] type is annotated with an id, you can have several `work_struct`
/// fields by using a different id for each one.
///
/// # Safety
///
-/// The [`OFFSET`] constant must be the offset of a field in Self of type [`Work<T, ID>`]. The methods on
-/// this trait must have exactly the behavior that the definitions given below have.
+/// The [`OFFSET`] constant must be the offset of a field in `Self` of type [`Work<T, ID>`]. The
+/// methods on this trait must have exactly the behavior that the definitions given below have.
///
-/// [`Work<T, ID>`]: Work
/// [`impl_has_work!`]: crate::impl_has_work
/// [`OFFSET`]: HasWork::OFFSET
pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// The offset of the [`Work<T, ID>`] field.
- ///
- /// [`Work<T, ID>`]: Work
const OFFSET: usize;
/// Returns the offset of the [`Work<T, ID>`] field.
///
- /// This method exists because the [`OFFSET`] constant cannot be accessed if the type is not Sized.
+ /// This method exists because the [`OFFSET`] constant cannot be accessed if the type is not
+ /// [`Sized`].
///
- /// [`Work<T, ID>`]: Work
/// [`OFFSET`]: HasWork::OFFSET
#[inline]
fn get_work_offset(&self) -> usize {
@@ -442,8 +450,6 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// # Safety
///
/// The provided pointer must point at a valid struct of type `Self`.
- ///
- /// [`Work<T, ID>`]: Work
#[inline]
unsafe fn raw_get_work(ptr: *mut Self) -> *mut Work<T, ID> {
// SAFETY: The caller promises that the pointer is valid.
@@ -455,8 +461,6 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// # Safety
///
/// The pointer must point at a [`Work<T, ID>`] field in a struct of type `Self`.
- ///
- /// [`Work<T, ID>`]: Work
#[inline]
unsafe fn work_container_of(ptr: *mut Work<T, ID>) -> *mut Self
where
@@ -473,9 +477,8 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// # Examples
///
/// ```
-/// use kernel::impl_has_work;
/// use kernel::sync::Arc;
-/// use kernel::workqueue::{self, Work};
+/// use kernel::workqueue::{self, impl_has_work, Work};
///
/// struct MyStruct {
/// work_field: Work<MyStruct, 17>,
@@ -485,8 +488,6 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// impl HasWork<MyStruct, 17> for MyStruct { self.work_field }
/// }
/// ```
-///
-/// [`HasWork<T, ID>`]: HasWork
#[macro_export]
macro_rules! impl_has_work {
($(impl$(<$($implarg:ident),*>)?
@@ -509,6 +510,7 @@ macro_rules! impl_has_work {
}
)*};
}
+pub use impl_has_work;
impl_has_work! {
impl<T> HasWork<Self> for ClosureWork<T> { self.work }