aboutsummaryrefslogtreecommitdiffstats
path: root/rust
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-29 08:19:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-29 08:19:46 -0700
commita031fe8d1d32898582e36ccbffa9847d16f67aa2 (patch)
treebb097e00fcf06c92efffe95e48163e5658d527fc /rust
parentf2586d921cea4feeddd1cc5ee3495700540dba8f (diff)
parent4af84c6a85c63bec24611e46bb3de2c0a6602a51 (diff)
downloadlinux-a031fe8d1d32898582e36ccbffa9847d16f67aa2.tar.gz
Merge tag 'rust-6.6' of https://github.com/Rust-for-Linux/linux
Pull rust updates from Miguel Ojeda: "In terms of lines, most changes this time are on the pinned-init API and infrastructure. While we have a Rust version upgrade, and thus a bunch of changes from the vendored 'alloc' crate as usual, this time those do not account for many lines. Toolchain and infrastructure: - Upgrade to Rust 1.71.1. This is the second such upgrade, which is a smaller jump compared to the last time. This version allows us to remove the '__rust_*' allocator functions -- the compiler now generates them as expected, thus now our 'KernelAllocator' is used. It also introduces the 'offset_of!' macro in the standard library (as an unstable feature) which we will need soon. So far, we were using a declarative macro as a prerequisite in some not-yet-landed patch series, which did not support sub-fields (i.e. nested structs): #[repr(C)] struct S { a: u16, b: (u8, u8), } assert_eq!(offset_of!(S, b.1), 3); - Upgrade to bindgen 0.65.1. This is the first time we upgrade its version. Given it is a fairly big jump, it comes with a fair number of improvements/changes that affect us, such as a fix needed to support LLVM 16 as well as proper support for '__noreturn' C functions, which are now mapped to return the '!' type in Rust: void __noreturn f(void); // C pub fn f() -> !; // Rust - 'scripts/rust_is_available.sh' improvements and fixes. This series takes care of all the issues known so far and adds a few new checks to cover for even more cases, plus adds some more help texts. All this together will hopefully make problematic setups easier to identify and to be solved by users building the kernel. In addition, it adds a test suite which covers all branches of the shell script, as well as tests for the issues found so far. - Support rust-analyzer for out-of-tree modules too. - Give 'cfg's to rust-analyzer for the 'core' and 'alloc' crates. - Drop 'scripts/is_rust_module.sh' since it is not needed anymore. Macros crate: - New 'paste!' proc macro. This macro is a more flexible version of 'concat_idents!': it allows the resulting identifier to be used to declare new items and it allows to transform the identifiers before concatenating them, e.g. let x_1 = 42; paste!(let [<x _2>] = [<x _1>];); assert!(x_1 == x_2); The macro is then used for several of the pinned-init API changes in this pull. Pinned-init API: - Make '#[pin_data]' compatible with conditional compilation of fields, allowing to write code like: #[pin_data] pub struct Foo { #[cfg(CONFIG_BAR)] a: Bar, #[cfg(not(CONFIG_BAR))] a: Baz, } - New '#[derive(Zeroable)]' proc macro for the 'Zeroable' trait, which allows 'unsafe' implementations for structs where every field implements the 'Zeroable' trait, e.g.: #[derive(Zeroable)] pub struct DriverData { id: i64, buf_ptr: *mut u8, len: usize, } - Add '..Zeroable::zeroed()' syntax to the 'pin_init!' macro for zeroing all other fields, e.g.: pin_init!(Buf { buf: [1; 64], ..Zeroable::zeroed() }); - New '{,pin_}init_array_from_fn()' functions to create array initializers given a generator function, e.g.: let b: Box<[usize; 1_000]> = Box::init::<Error>( init_array_from_fn(|i| i) ).unwrap(); assert_eq!(b.len(), 1_000); assert_eq!(b[123], 123); - New '{,pin_}chain' methods for '{,Pin}Init<T, E>' that allow to execute a closure on the value directly after initialization, e.g.: let foo = init!(Foo { buf <- init::zeroed() }).chain(|foo| { foo.setup(); Ok(()) }); - Support arbitrary paths in init macros, instead of just identifiers and generic types. - Implement the 'Zeroable' trait for the 'UnsafeCell<T>' and 'Opaque<T>' types. - Make initializer values inaccessible after initialization. - Make guards in the init macros hygienic. 'allocator' module: - Use 'krealloc_aligned()' in 'KernelAllocator::alloc' preventing misaligned allocations when the Rust 1.71.1 upgrade is applied later in this pull. The equivalent fix for the previous compiler version (where 'KernelAllocator' is not yet used) was merged into 6.5 already, which added the 'krealloc_aligned()' function used here. - Implement 'KernelAllocator::{realloc, alloc_zeroed}' for performance, using 'krealloc_aligned()' too, which forwards the call to the C API. 'types' module: - Make 'Opaque' be '!Unpin', removing the need to add a 'PhantomPinned' field to Rust structs that contain C structs which must not be moved. - Make 'Opaque' use 'UnsafeCell' as the outer type, rather than inner. Documentation: - Suggest obtaining the source code of the Rust's 'core' library using the tarball instead of the repository. MAINTAINERS: - Andreas and Alice, from Samsung and Google respectively, are joining as reviewers of the "RUST" entry. As well as a few other minor changes and cleanups" * tag 'rust-6.6' of https://github.com/Rust-for-Linux/linux: (42 commits) rust: init: update expanded macro explanation rust: init: add `{pin_}chain` functions to `{Pin}Init<T, E>` rust: init: make `PinInit<T, E>` a supertrait of `Init<T, E>` rust: init: implement `Zeroable` for `UnsafeCell<T>` and `Opaque<T>` rust: init: add support for arbitrary paths in init macros rust: init: add functions to create array initializers rust: init: add `..Zeroable::zeroed()` syntax for zeroing all missing fields rust: init: make initializer values inaccessible after initializing rust: init: wrap type checking struct initializers in a closure rust: init: make guards in the init macros hygienic rust: add derive macro for `Zeroable` rust: init: make `#[pin_data]` compatible with conditional compilation of fields rust: init: consolidate init macros docs: rust: clarify what 'rustup override' does docs: rust: update instructions for obtaining 'core' source docs: rust: add command line to rust-analyzer section scripts: generate_rust_analyzer: provide `cfg`s for `core` and `alloc` rust: bindgen: upgrade to 0.65.1 rust: enable `no_mangle_with_rust_abi` Clippy lint rust: upgrade to Rust 1.71.1 ...
Diffstat (limited to 'rust')
-rw-r--r--rust/Makefile17
-rw-r--r--rust/alloc/alloc.rs20
-rw-r--r--rust/alloc/boxed.rs131
-rw-r--r--rust/alloc/lib.rs48
-rw-r--r--rust/alloc/raw_vec.rs18
-rw-r--r--rust/alloc/slice.rs43
-rw-r--r--rust/alloc/vec/drain.rs8
-rw-r--r--rust/alloc/vec/drain_filter.rs8
-rw-r--r--rust/alloc/vec/into_iter.rs35
-rw-r--r--rust/alloc/vec/mod.rs84
-rw-r--r--rust/compiler_builtins.rs7
-rw-r--r--rust/helpers.c21
-rw-r--r--rust/kernel/allocator.rs84
-rw-r--r--rust/kernel/init.rs646
-rw-r--r--rust/kernel/init/__internal.rs39
-rw-r--r--rust/kernel/init/macros.rs519
-rw-r--r--rust/kernel/lib.rs3
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/sync/lock.rs6
-rw-r--r--rust/kernel/types.rs21
-rw-r--r--rust/macros/lib.rs117
-rw-r--r--rust/macros/module.rs2
-rw-r--r--rust/macros/paste.rs96
-rw-r--r--rust/macros/quote.rs12
-rw-r--r--rust/macros/zeroable.rs72
25 files changed, 1324 insertions, 735 deletions
diff --git a/rust/Makefile b/rust/Makefile
index 30b71af119a20a..0d4bb06c5ceee2 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -329,7 +329,7 @@ quiet_cmd_bindgen = BINDGEN $@
$(BINDGEN) $< $(bindgen_target_flags) \
--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
--no-debug '.*' \
- --size_t-is-usize -o $@ -- $(bindgen_c_flags_final) -DMODULE \
+ -o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
$(obj)/bindings/bindings_generated.rs: private bindgen_target_flags = \
@@ -349,8 +349,8 @@ $(obj)/uapi/uapi_generated.rs: $(src)/uapi/uapi_helper.h \
# given it is `libclang`; but for consistency, future Clang changes and/or
# a potential future GCC backend for `bindgen`, we disable it too.
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_flags = \
- --blacklist-type '.*' --whitelist-var '' \
- --whitelist-function 'rust_helper_.*'
+ --blocklist-type '.*' --allowlist-var '' \
+ --allowlist-function 'rust_helper_.*'
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
@@ -402,12 +402,15 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
$(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
rust-analyzer:
- $(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) \
- $(RUST_LIB_SRC) > $(objtree)/rust-project.json
+ $(Q)$(srctree)/scripts/generate_rust_analyzer.py \
+ --cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \
+ $(abs_srctree) $(abs_objtree) \
+ $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \
+ $(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json
redirect-intrinsics = \
- __eqsf2 __gesf2 __lesf2 __nesf2 __unordsf2 \
- __unorddf2 \
+ __addsf3 __eqsf2 __gesf2 __lesf2 __ltsf2 __mulsf3 __nesf2 __unordsf2 \
+ __adddf3 __ledf2 __ltdf2 __muldf3 __unorddf2 \
__muloti4 __multi3 \
__udivmodti4 __udivti3 __umodti3
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
index acf22d45e6f26c..0b6bf5b6da4345 100644
--- a/rust/alloc/alloc.rs
+++ b/rust/alloc/alloc.rs
@@ -16,8 +16,6 @@ use core::ptr::{self, NonNull};
#[doc(inline)]
pub use core::alloc::*;
-use core::marker::Destruct;
-
#[cfg(test)]
mod tests;
@@ -41,6 +39,9 @@ extern "Rust" {
#[rustc_allocator_zeroed]
#[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+
+ #[cfg(not(bootstrap))]
+ static __rust_no_alloc_shim_is_unstable: u8;
}
/// The global memory allocator.
@@ -94,7 +95,14 @@ pub use std::alloc::Global;
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
- unsafe { __rust_alloc(layout.size(), layout.align()) }
+ unsafe {
+ // Make sure we don't accidentally allow omitting the allocator shim in
+ // stable code until it is actually stabilized.
+ #[cfg(not(bootstrap))]
+ core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
+
+ __rust_alloc(layout.size(), layout.align())
+ }
}
/// Deallocate memory with the global allocator.
@@ -333,16 +341,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
- ptr: Unique<T>,
- alloc: A,
-) {
+pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
index 14af9860c36cd4..c8173cea831773 100644
--- a/rust/alloc/boxed.rs
+++ b/rust/alloc/boxed.rs
@@ -152,16 +152,13 @@ use core::any::Any;
use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
-use core::iter::{FusedIterator, Iterator};
+use core::iter::FusedIterator;
use core::marker::Tuple;
-use core::marker::{Destruct, Unpin, Unsize};
+use core::marker::Unsize;
use core::mem;
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
@@ -218,6 +215,7 @@ impl<T> Box<T> {
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
+ #[rustc_diagnostic_item = "box_new"]
pub fn new(x: T) -> Self {
#[rustc_box]
Box::new(x)
@@ -287,9 +285,7 @@ impl<T> Box<T> {
#[must_use]
#[inline(always)]
pub fn pin(x: T) -> Pin<Box<T>> {
- (#[rustc_box]
- Box::new(x))
- .into()
+ Box::new(x).into()
}
/// Allocates memory on the heap then places `x` into it,
@@ -381,12 +377,11 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline]
- pub const fn new_in(x: T, alloc: A) -> Self
+ pub fn new_in(x: T, alloc: A) -> Self
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::new_uninit_in(alloc);
unsafe {
@@ -411,12 +406,10 @@ impl<T, A: Allocator> Box<T, A> {
/// # Ok::<(), std::alloc::AllocError>(())
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
where
- T: ~const Destruct,
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::try_new_uninit_in(alloc)?;
unsafe {
@@ -446,13 +439,12 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
#[must_use]
// #[unstable(feature = "new_uninit", issue = "63291")]
- pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -487,10 +479,9 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate(layout)?.cast();
@@ -518,13 +509,12 @@ impl<T, A: Allocator> Box<T, A> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
- pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -559,10 +549,9 @@ impl<T, A: Allocator> Box<T, A> {
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate_zeroed(layout)?.cast();
@@ -578,12 +567,11 @@ impl<T, A: Allocator> Box<T, A> {
/// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline(always)]
- pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
+ pub fn pin_in(x: T, alloc: A) -> Pin<Self>
where
- A: 'static + ~const Allocator + ~const Destruct,
+ A: 'static + Allocator,
{
Self::into_pin(Self::new_in(x, alloc))
}
@@ -592,8 +580,7 @@ impl<T, A: Allocator> Box<T, A> {
///
/// This conversion does not allocate on the heap and happens in place.
#[unstable(feature = "box_into_boxed_slice", issue = "71582")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
+ pub fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
let (raw, alloc) = Box::into_raw_with_allocator(boxed);
unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
}
@@ -610,12 +597,8 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(Box::into_inner(c), 5);
/// ```
#[unstable(feature = "box_into_inner", issue = "80437")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_inner(boxed: Self) -> T
- where
- Self: ~const Destruct,
- {
+ pub fn into_inner(boxed: Self) -> T {
*boxed
}
}
@@ -829,9 +812,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const unsafe fn assume_init(self) -> Box<T, A> {
+ pub unsafe fn assume_init(self) -> Box<T, A> {
let (raw, alloc) = Box::into_raw_with_allocator(self);
unsafe { Box::from_raw_in(raw as *mut T, alloc) }
}
@@ -864,9 +846,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// }
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn write(mut boxed: Self, value: T) -> Box<T, A> {
+ pub fn write(mut boxed: Self, value: T) -> Box<T, A> {
unsafe {
(*boxed).write(value);
boxed.assume_init()
@@ -1110,9 +1091,8 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
///
/// [memory layout]: self#memory-layout
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
+ pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
let (leaked, alloc) = Box::into_unique(b);
(leaked.as_ptr(), alloc)
}
@@ -1122,10 +1102,9 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
issue = "none",
reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
)]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
#[doc(hidden)]
- pub const fn into_unique(b: Self) -> (Unique<T>, A) {
+ pub fn into_unique(b: Self) -> (Unique<T>, A) {
// Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
// raw pointer for the type system. Turning it directly into a raw pointer would not be
// recognized as "releasing" the unique pointer to permit aliased raw accesses,
@@ -1183,9 +1162,8 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// assert_eq!(*static_ref, [4, 2, 3]);
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn leak<'a>(b: Self) -> &'a mut T
+ pub fn leak<'a>(b: Self) -> &'a mut T
where
A: 'a,
{
@@ -1246,16 +1224,16 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
+ #[inline]
fn default() -> Self {
- #[rustc_box]
Box::new(T::default())
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl<T> const Default for Box<[T]> {
+impl<T> Default for Box<[T]> {
+ #[inline]
fn default() -> Self {
let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling();
Box(ptr, Global)
@@ -1264,8 +1242,8 @@ impl<T> const Default for Box<[T]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "default_box_extra", since = "1.17.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl const Default for Box<str> {
+impl Default for Box<str> {
+ #[inline]
fn default() -> Self {
// SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
let ptr: Unique<str> = unsafe {
@@ -1461,8 +1439,7 @@ impl<T> From<T> for Box<T> {
}
#[stable(feature = "pin", since = "1.33.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const From<Box<T, A>> for Pin<Box<T, A>>
+impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Pin<Box<T, A>>
where
A: 'static,
{
@@ -1482,9 +1459,36 @@ where
}
}
+/// Specialization trait used for `From<&[T]>`.
+#[cfg(not(no_global_oom_handling))]
+trait BoxFromSlice<T> {
+ fn from_slice(slice: &[T]) -> Self;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> BoxFromSlice<T> for Box<[T]> {
+ #[inline]
+ default fn from_slice(slice: &[T]) -> Self {
+ slice.to_vec().into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy> BoxFromSlice<T> for Box<[T]> {
+ #[inline]
+ fn from_slice(slice: &[T]) -> Self {
+ let len = slice.len();
+ let buf = RawVec::with_capacity(len);
+ unsafe {
+ ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+ buf.into_box(slice.len()).assume_init()
+ }
+ }
+}
+
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_slice", since = "1.17.0")]
-impl<T: Copy> From<&[T]> for Box<[T]> {
+impl<T: Clone> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
@@ -1498,19 +1502,15 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
///
/// println!("{boxed_slice:?}");
/// ```
+ #[inline]
fn from(slice: &[T]) -> Box<[T]> {
- let len = slice.len();
- let buf = RawVec::with_capacity(len);
- unsafe {
- ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
- buf.into_box(slice.len()).assume_init()
- }
+ <Self as BoxFromSlice<T>>::from_slice(slice)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_cow", since = "1.45.0")]
-impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
+impl<T: Clone> From<Cow<'_, [T]>> for Box<[T]> {
/// Converts a `Cow<'_, [T]>` into a `Box<[T]>`
///
/// When `cow` is the `Cow::Borrowed` variant, this
@@ -1620,7 +1620,6 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> {
/// println!("{boxed:?}");
/// ```
fn from(array: [T; N]) -> Box<[T]> {
- #[rustc_box]
Box::new(array)
}
}
@@ -1899,8 +1898,7 @@ impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
+impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
type Target = T;
fn deref(&self) -> &T {
@@ -1909,8 +1907,7 @@ impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const DerefMut for Box<T, A> {
+impl<T: ?Sized, A: Allocator> DerefMut for Box<T, A> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
index 5f374378b0d494..85e91356ecb308 100644
--- a/rust/alloc/lib.rs
+++ b/rust/alloc/lib.rs
@@ -89,35 +89,37 @@
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
+#![warn(multiple_supertrait_upcastable)]
//
// Library features:
+// tidy-alphabetical-start
+#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
+#![cfg_attr(test, feature(is_sorted))]
+#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
#![feature(array_into_iter_constructors)]
#![feature(array_methods)]
#![feature(array_windows)]
+#![feature(ascii_char)]
#![feature(assert_matches)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![feature(const_align_of_val)]
#![feature(const_box)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
#![cfg_attr(not(no_borrow), feature(const_cow_is_borrowed))]
-#![feature(const_convert)]
-#![feature(const_size_of_val)]
-#![feature(const_align_of_val)]
-#![feature(const_ptr_read)]
-#![feature(const_maybe_uninit_zeroed)]
-#![feature(const_maybe_uninit_write)]
+#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_maybe_uninit_write)]
+#![feature(const_maybe_uninit_zeroed)]
+#![feature(const_pin)]
#![feature(const_refs_to_cell)]
+#![feature(const_size_of_val)]
+#![feature(const_waker)]
#![feature(core_intrinsics)]
#![feature(core_panic)]
-#![feature(const_eval_select)]
-#![feature(const_pin)]
-#![feature(const_waker)]
-#![feature(cstr_from_bytes_until_nul)]
#![feature(dispatch_from_dyn)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
@@ -128,7 +130,6 @@
#![feature(hasher_prefixfree_extras)]
#![feature(inline_const)]
#![feature(inplace_iteration)]
-#![cfg_attr(test, feature(is_sorted))]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
@@ -136,8 +137,6 @@
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
-#![cfg_attr(test, feature(new_uninit))]
-#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
#![feature(provide_any)]
@@ -153,6 +152,7 @@
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(slice_range)]
+#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_len)]
@@ -163,40 +163,42 @@
#![feature(unicode_internals)]
#![feature(unsize)]
#![feature(utf8_chunks)]
-#![feature(std_internals)]
+// tidy-alphabetical-end
//
// Language features:
+// tidy-alphabetical-start
+#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(test, feature(panic_update_hook))]
+#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
+#![feature(c_unwind)]
#![feature(cfg_sanitize)]
-#![feature(const_deref)]
#![feature(const_mut_refs)]
-#![feature(const_ptr_write)]
#![feature(const_precise_live_drops)]
+#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(dropck_eyepatch)]
#![feature(exclusive_range_pattern)]
#![feature(fundamental)]
-#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
#![feature(min_specialization)]
+#![feature(multiple_supertrait_upcastable)]
#![feature(negative_impls)]
#![feature(never_type)]
+#![feature(pointer_is_aligned)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
-#![feature(pointer_is_aligned)]
#![feature(slice_internals)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
-#![cfg_attr(test, feature(test))]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
-#![feature(c_unwind)]
#![feature(with_negative_coherence)]
-#![cfg_attr(test, feature(panic_update_hook))]
+// tidy-alphabetical-end
//
// Rustdoc features:
#![feature(doc_cfg)]
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
index 5db87eac53b782..65d5ce15828e43 100644
--- a/rust/alloc/raw_vec.rs
+++ b/rust/alloc/raw_vec.rs
@@ -6,7 +6,6 @@ use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
-use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
@@ -274,10 +273,15 @@ impl<T, A: Allocator> RawVec<T, A> {
if T::IS_ZST || self.cap == 0 {
None
} else {
- // We have an allocated chunk of memory, so we can bypass runtime
- // checks to get our current layout.
+ // We could use Layout::array here which ensures the absence of isize and usize overflows
+ // and could hypothetically handle differences between stride and size, but this memory
+ // has already been allocated so we know it can't overflow and currently rust does not
+ // support such types. So we can do better by skipping some checks and avoid an unwrap.
+ let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
- let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
+ let align = mem::align_of::<T>();
+ let size = mem::size_of::<T>().unchecked_mul(self.cap);
+ let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
@@ -465,11 +469,13 @@ impl<T, A: Allocator> RawVec<T, A> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
-
+ // See current_memory() why this assert is here
+ let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// overflowed earlier when capacity was larger.
- let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
+ let new_size = mem::size_of::<T>().unchecked_mul(cap);
+ let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs
index 245e01590df72e..6ac463bd3edc53 100644
--- a/rust/alloc/slice.rs
+++ b/rust/alloc/slice.rs
@@ -784,6 +784,38 @@ impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
}
}
+// Specializable trait for implementing ToOwned::clone_into. This is
+// public in the crate and has the Allocator parameter so that
+// vec::clone_from use it too.
+#[cfg(not(no_global_oom_handling))]
+pub(crate) trait SpecCloneIntoVec<T, A: Allocator> {
+ fn clone_into(&self, target: &mut Vec<T, A>);
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
+ default fn clone_into(&self, target: &mut Vec<T, A>) {
+ // drop anything in target that will not be overwritten
+ target.truncate(self.len());
+
+ // target.len <= self.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = self.split_at(target.len());
+
+ // reuse the contained values' allocations/resources.
+ target.clone_from_slice(init);
+ target.extend_from_slice(tail);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
+ fn clone_into(&self, target: &mut Vec<T, A>) {
+ target.clear();
+ target.extend_from_slice(self);
+ }
+}
+
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> ToOwned for [T] {
@@ -799,16 +831,7 @@ impl<T: Clone> ToOwned for [T] {
}
fn clone_into(&self, target: &mut Vec<T>) {
- // drop anything in target that will not be overwritten
- target.truncate(self.len());
-
- // target.len <= self.len due to the truncate above, so the
- // slices here are always in-bounds.
- let (init, tail) = self.split_at(target.len());
-
- // reuse the contained values' allocations/resources.
- target.clone_from_slice(init);
- target.extend_from_slice(tail);
+ SpecCloneIntoVec::clone_into(self, target);
}
}
diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs
index d503d2f478ce7a..78177a9e2ad0c2 100644
--- a/rust/alloc/vec/drain.rs
+++ b/rust/alloc/vec/drain.rs
@@ -18,7 +18,7 @@ use super::Vec;
///
/// ```
/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::Drain<_> = v.drain(..);
+/// let iter: std::vec::Drain<'_, _> = v.drain(..);
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<
@@ -114,9 +114,7 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
let unyielded_ptr = this.iter.as_slice().as_ptr();
// ZSTs have no identity, so we don't need to move them around.
- let needs_move = mem::size_of::<T>() != 0;
-
- if needs_move {
+ if !T::IS_ZST {
let start_ptr = source_vec.as_mut_ptr().add(start);
// memmove back unyielded elements
@@ -199,7 +197,7 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
}
- let iter = mem::replace(&mut self.iter, (&mut []).iter());
+ let iter = mem::take(&mut self.iter);
let drop_len = iter.len();
let mut vec = self.vec;
diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
index 4b019220657dfc..09efff090e428c 100644
--- a/rust/alloc/vec/drain_filter.rs
+++ b/rust/alloc/vec/drain_filter.rs
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::{Allocator, Global};
-use core::mem::{self, ManuallyDrop};
+use core::mem::{ManuallyDrop, SizedTypeProperties};
use core::ptr;
use core::slice;
@@ -18,7 +18,7 @@ use super::Vec;
/// #![feature(drain_filter)]
///
/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
+/// let iter: std::vec::DrainFilter<'_, _, _> = v.drain_filter(|x| *x % 2 == 0);
/// ```
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
#[derive(Debug)]
@@ -98,9 +98,7 @@ where
unsafe {
// ZSTs have no identity, so we don't need to move them around.
- let needs_move = mem::size_of::<T>() != 0;
-
- if needs_move && this.idx < this.old_len && this.del > 0 {
+ if !T::IS_ZST && this.idx < this.old_len && this.del > 0 {
let ptr = this.vec.as_mut_ptr();
let src = ptr.add(this.idx);
let dst = src.sub(this.del);
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
index 34a2a70d6ded3e..aac0ec16aef15d 100644
--- a/rust/alloc/vec/into_iter.rs
+++ b/rust/alloc/vec/into_iter.rs
@@ -13,6 +13,7 @@ use core::iter::{
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::num::NonZeroUsize;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@@ -109,7 +110,7 @@ impl<T, A: Allocator> IntoIter<T, A> {
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
/// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
- /// (&mut into_iter).for_each(core::mem::drop);
+ /// (&mut into_iter).for_each(drop);
/// std::mem::forget(into_iter);
/// ```
///
@@ -215,7 +216,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if T::IS_ZST {
@@ -229,10 +230,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
@@ -315,7 +313,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
if T::IS_ZST {
// SAFETY: same as for advance_by()
@@ -329,10 +327,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
}
@@ -349,6 +344,24 @@ impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T, A> Default for IntoIter<T, A>
+where
+ A: Allocator + Default,
+{
+ /// Creates an empty `vec::IntoIter`.
+ ///
+ /// ```
+ /// # use std::vec;
+ /// let iter: vec::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// assert_eq!(iter.as_slice(), &[]);
+ /// ```
+ fn default() -> Self {
+ super::Vec::new_in(Default::default()).into_iter()
+ }
+}
+
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index 94995913566bb4..05c70de0227ed3 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -58,13 +58,9 @@
#[cfg(not(no_global_oom_handling))]
use core::cmp;
use core::cmp::Ordering;
-use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::assume;
use core::iter;
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
@@ -381,8 +377,8 @@ mod spec_extend;
/// Currently, `Vec` does not guarantee the order in which elements are dropped.
/// The order has changed in the past and may change again.
///
-/// [`get`]: ../../std/vec/struct.Vec.html#method.get
-/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
+/// [`get`]: slice::get
+/// [`get_mut`]: slice::get_mut
/// [`String`]: crate::string::String
/// [`&str`]: type@str
/// [`shrink_to_fit`]: Vec::shrink_to_fit
@@ -708,14 +704,14 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
@@ -766,14 +762,14 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
@@ -999,7 +995,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec: Vec<i32> = Vec::with_capacity(10);
/// vec.push(42);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -1150,7 +1146,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// vec.shrink_to_fit();
/// assert!(vec.capacity() >= 3);
/// ```
@@ -1177,7 +1173,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// vec.shrink_to(4);
/// assert!(vec.capacity() >= 4);
/// vec.shrink_to(0);
@@ -1212,7 +1208,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
///
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// let slice = vec.into_boxed_slice();
/// assert_eq!(slice.into_vec().capacity(), 3);
/// ```
@@ -1358,11 +1354,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
// `deref`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
@@ -1395,11 +1387,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
// `deref_mut`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns a reference to the underlying allocator.
@@ -2892,35 +2880,6 @@ impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
}
#[cfg(not(no_global_oom_handling))]
-trait SpecCloneFrom {
- fn clone_from(this: &mut Self, other: &Self);
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> {
- default fn clone_from(this: &mut Self, other: &Self) {
- // drop anything that will not be overwritten
- this.truncate(other.len());
-
- // self.len <= other.len due to the truncate above, so the
- // slices here are always in-bounds.
- let (init, tail) = other.split_at(this.len());
-
- // reuse the contained values' allocations/resources.
- this.clone_from_slice(init);
- this.extend_from_slice(tail);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> {
- fn clone_from(this: &mut Self, other: &Self) {
- this.clear();
- this.extend_from_slice(other);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
#[cfg(not(test))]
@@ -2940,7 +2899,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
}
fn clone_from(&mut self, other: &Self) {
- SpecCloneFrom::clone_from(self, other)
+ crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self);
}
}
@@ -2948,7 +2907,6 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// as required by the `core::borrow::Borrow` implementation.
///
/// ```
-/// #![feature(build_hasher_simple_hash_one)]
/// use std::hash::BuildHasher;
///
/// let b = std::collections::hash_map::RandomState::new();
@@ -3330,7 +3288,7 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
}
}
-/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[inline]
@@ -3342,7 +3300,7 @@ impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
-/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
#[inline]
@@ -3365,8 +3323,7 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl<T> const Default for Vec<T> {
+impl<T> Default for Vec<T> {
/// Creates an empty `Vec<T>`.
///
/// The vector will not allocate until elements are pushed onto it.
@@ -3462,10 +3419,7 @@ impl<T, const N: usize> From<[T; N]> for Vec<T> {
/// ```
#[cfg(not(test))]
fn from(s: [T; N]) -> Vec<T> {
- <[T]>::into_vec(
- #[rustc_box]
- Box::new(s),
- )
+ <[T]>::into_vec(Box::new(s))
}
#[cfg(test)]
@@ -3490,8 +3444,8 @@ where
///
/// ```
/// # use std::borrow::Cow;
- /// let o: Cow<[i32]> = Cow::Owned(vec![1, 2, 3]);
- /// let b: Cow<[i32]> = Cow::Borrowed(&[1, 2, 3]);
+ /// let o: Cow<'_, [i32]> = Cow::Owned(vec![1, 2, 3]);
+ /// let b: Cow<'_, [i32]> = Cow::Borrowed(&[1, 2, 3]);
/// assert_eq!(Vec::from(o), Vec::from(b));
/// ```
fn from(s: Cow<'a, [T]>) -> Vec<T> {
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index 43378357ece99b..fb8ac3f211de52 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -37,14 +37,21 @@ macro_rules! define_panicking_intrinsics(
);
define_panicking_intrinsics!("`f32` should not be used", {
+ __addsf3,
__eqsf2,
__gesf2,
__lesf2,
+ __ltsf2,
+ __mulsf3,
__nesf2,
__unordsf2,
});
define_panicking_intrinsics!("`f64` should not be used", {
+ __adddf3,
+ __ledf2,
+ __ltdf2,
+ __muldf3,
__unorddf2,
});
diff --git a/rust/helpers.c b/rust/helpers.c
index 49a5e1a4f0ae92..4c86fe4a7e05e7 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -16,6 +16,8 @@
*
* All symbols are exported as GPL-only to guarantee no GPL-only feature is
* accidentally exposed.
+ *
+ * Sorted alphabetically.
*/
#include <kunit/test-bug.h>
@@ -23,10 +25,10 @@
#include <linux/build_bug.h>
#include <linux/err.h>
#include <linux/errname.h>
-#include <linux/refcount.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/refcount.h>
#include <linux/sched/signal.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
__noreturn void rust_helper_BUG(void)
@@ -143,19 +145,18 @@ struct kunit *rust_helper_kunit_get_current_test(void)
EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
/*
- * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
- * as the Rust `usize` type, so we can use it in contexts where Rust
- * expects a `usize` like slice (array) indices. `usize` is defined to be
- * the same as C's `uintptr_t` type (can hold any pointer) but not
- * necessarily the same as `size_t` (can hold the size of any single
- * object). Most modern platforms use the same concrete integer type for
+ * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
+ * use it in contexts where Rust expects a `usize` like slice (array) indices.
+ * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
+ * pointer) but not necessarily the same as `size_t` (can hold the size of any
+ * single object). Most modern platforms use the same concrete integer type for
* both of them, but in case we find ourselves on a platform where
* that's not true, fail early instead of risking ABI or
* integer-overflow issues.
*
* If your platform fails this assertion, it means that you are in
- * danger of integer-overflow bugs (even if you attempt to remove
- * `--size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * danger of integer-overflow bugs (even if you attempt to add
+ * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
* your platform such that `size_t` matches `uintptr_t` (i.e., to increase
* `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
*/
diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
index 9363b527be6645..a8f3d5be1af1e7 100644
--- a/rust/kernel/allocator.rs
+++ b/rust/kernel/allocator.rs
@@ -41,9 +41,9 @@ unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gf
unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- // `krealloc()` is used instead of `kmalloc()` because the latter is
- // an inline function and cannot be bound to as a result.
- unsafe { bindings::krealloc(ptr::null(), layout.size(), bindings::GFP_KERNEL) as *mut u8 }
+ // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+ // requirement.
+ unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) }
}
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
@@ -51,58 +51,38 @@ unsafe impl GlobalAlloc for KernelAllocator {
bindings::kfree(ptr as *const core::ffi::c_void);
}
}
-}
-
-#[global_allocator]
-static ALLOCATOR: KernelAllocator = KernelAllocator;
-
-// `rustc` only generates these for some crate types. Even then, we would need
-// to extract the object file that has them from the archive. For the moment,
-// let's generate them ourselves instead.
-//
-// Note: Although these are *safe* functions, they are called by the compiler
-// with parameters that obey the same `GlobalAlloc` function safety
-// requirements: size and align should form a valid layout, and size is
-// greater than 0.
-//
-// Note that `#[no_mangle]` implies exported too, nowadays.
-#[no_mangle]
-fn __rust_alloc(size: usize, align: usize) -> *mut u8 {
- // SAFETY: See assumption above.
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
- // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater
- // than 0.
- unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) }
-}
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY:
+ // - `new_size`, when rounded up to the nearest multiple of `layout.align()`, will not
+ // overflow `isize` by the function safety requirement.
+ // - `layout.align()` is a proper alignment (i.e. not zero and must be a power of two).
+ let layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
+
+ // SAFETY:
+ // - `ptr` is either null or a pointer allocated by this allocator by the function safety
+ // requirement.
+ // - the size of `layout` is not zero because `new_size` is not zero by the function safety
+ // requirement.
+ unsafe { krealloc_aligned(ptr, layout, bindings::GFP_KERNEL) }
+ }
-#[no_mangle]
-fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) {
- unsafe { bindings::kfree(ptr as *const core::ffi::c_void) };
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+ // requirement.
+ unsafe {
+ krealloc_aligned(
+ ptr::null_mut(),
+ layout,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ )
+ }
+ }
}
-#[no_mangle]
-fn __rust_realloc(ptr: *mut u8, _old_size: usize, align: usize, new_size: usize) -> *mut u8 {
- // SAFETY: See assumption above.
- let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, align) };
-
- // SAFETY: Per assumption above, `ptr` is allocated by `__rust_*` before, and the size of
- // `new_layout` is greater than 0.
- unsafe { krealloc_aligned(ptr, new_layout, bindings::GFP_KERNEL) }
-}
+#[global_allocator]
+static ALLOCATOR: KernelAllocator = KernelAllocator;
+// See <https://github.com/rust-lang/rust/pull/86844>.
#[no_mangle]
-fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8 {
- // SAFETY: See assumption above.
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
-
- // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater
- // than 0.
- unsafe {
- krealloc_aligned(
- ptr::null_mut(),
- layout,
- bindings::GFP_KERNEL | bindings::__GFP_ZERO,
- )
- }
-}
+static __rust_no_alloc_shim_is_unstable: u8 = 0;
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index e05563aad2ed59..4ebb6f23fc2ec0 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -212,11 +212,12 @@
use crate::{
error::{self, Error},
sync::UniqueArc,
+ types::{Opaque, ScopeGuard},
};
use alloc::boxed::Box;
use core::{
alloc::AllocError,
- cell::Cell,
+ cell::UnsafeCell,
convert::Infallible,
marker::PhantomData,
mem::MaybeUninit,
@@ -518,13 +519,17 @@ macro_rules! stack_try_pin_init {
/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
/// pointer named `this` inside of the initializer.
+/// - Using struct update syntax one can place `..Zeroable::zeroed()` at the very end of the
+/// struct, this initializes every field with 0 and then runs all initializers specified in the
+/// body. This can only be done if [`Zeroable`] is implemented for the struct.
///
/// For instance:
///
/// ```rust
-/// # use kernel::{macros::pin_data, pin_init};
+/// # use kernel::{macros::{Zeroable, pin_data}, pin_init};
/// # use core::{ptr::addr_of_mut, marker::PhantomPinned};
/// #[pin_data]
+/// #[derive(Zeroable)]
/// struct Buf {
/// // `ptr` points into `buf`.
/// ptr: *mut u8,
@@ -537,6 +542,10 @@ macro_rules! stack_try_pin_init {
/// ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },
/// pin: PhantomPinned,
/// });
+/// pin_init!(Buf {
+/// buf: [1; 64],
+/// ..Zeroable::zeroed()
+/// });
/// ```
///
/// [`try_pin_init!`]: kernel::try_pin_init
@@ -548,11 +557,15 @@ macro_rules! pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error(::core::convert::Infallible),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
}
@@ -601,205 +614,31 @@ macro_rules! try_pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)? ),
@fields($($fields)*),
@error($crate::error::Error),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)? ),
@fields($($fields)*),
@error($err),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
- (
- @this($($this:ident)?),
- @typ($t:ident $(::<$($generics:ty),*>)?),
- @fields($($fields:tt)*),
- @error($err:ty),
- ) => {{
- // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
- // type and shadow it later when we insert the arbitrary user code. That way there will be
- // no possibility of returning without `unsafe`.
- struct __InitOk;
- // Get the pin data from the supplied type.
- let data = unsafe {
- use $crate::init::__internal::HasPinData;
- $t$(::<$($generics),*>)?::__pin_data()
- };
- // Ensure that `data` really is of type `PinData` and help with type inference:
- let init = $crate::init::__internal::PinData::make_closure::<_, __InitOk, $err>(
- data,
- move |slot| {
- {
- // Shadow the structure so it cannot be used to return early.
- struct __InitOk;
- // Create the `this` so it can be referenced by the user inside of the
- // expressions creating the individual fields.
- $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
- // Initialize every field.
- $crate::try_pin_init!(init_slot:
- @data(data),
- @slot(slot),
- @munch_fields($($fields)*,),
- );
- // We use unreachable code to ensure that all fields have been mentioned exactly
- // once, this struct initializer will still be type-checked and complain with a
- // very natural error message if a field is forgotten/mentioned more than once.
- #[allow(unreachable_code, clippy::diverging_sub_expression)]
- if false {
- $crate::try_pin_init!(make_initializer:
- @slot(slot),
- @type_name($t),
- @munch_fields($($fields)*,),
- @acc(),
- );
- }
- // Forget all guards, since initialization was a success.
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($fields)*,),
- );
- }
- Ok(__InitOk)
- }
- );
- let init = move |slot| -> ::core::result::Result<(), $err> {
- init(slot).map(|__InitOk| ())
- };
- let init = unsafe { $crate::init::pin_init_from_closure::<_, $err>(init) };
- init
- }};
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- @munch_fields($(,)?),
- ) => {
- // Endpoint of munching, no fields are left.
- };
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- // In-place initialization syntax.
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- let $field = $val;
- // Call the initializer.
- //
- // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
- // return when an error/panic occurs.
- // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
- unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), $field)? };
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be forgotten via safe code.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_pin_init!(init_slot:
- @data($data),
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- // Direct value init, this is safe for every field.
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- $(let $field = $val;)?
- // Initialize the field.
- //
- // SAFETY: The memory at `slot` is uninitialized.
- unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
- // Create the drop guard:
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_pin_init!(init_slot:
- @data($data),
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($(,)?),
- @acc($($acc:tt)*),
- ) => {
- // Endpoint, nothing more to munch, create the initializer.
- // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
- // get the correct type inference here:
- unsafe {
- ::core::ptr::write($slot, $t {
- $($acc)*
- });
- }
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_pin_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)* $field: ::core::panic!(),),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_pin_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)* $field: ::core::panic!(),),
- );
- };
- (forget_guards:
- @munch_fields($(,)?),
- ) => {
- // Munching finished.
- };
- (forget_guards:
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
- (forget_guards:
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
}
/// Construct an in-place initializer for `struct`s.
@@ -824,11 +663,15 @@ macro_rules! init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error(::core::convert::Infallible),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
}
}
@@ -871,199 +714,31 @@ macro_rules! try_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error($crate::error::Error),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error($err),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
};
- (
- @this($($this:ident)?),
- @typ($t:ident $(::<$($generics:ty),*>)?),
- @fields($($fields:tt)*),
- @error($err:ty),
- ) => {{
- // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
- // type and shadow it later when we insert the arbitrary user code. That way there will be
- // no possibility of returning without `unsafe`.
- struct __InitOk;
- // Get the init data from the supplied type.
- let data = unsafe {
- use $crate::init::__internal::HasInitData;
- $t$(::<$($generics),*>)?::__init_data()
- };
- // Ensure that `data` really is of type `InitData` and help with type inference:
- let init = $crate::init::__internal::InitData::make_closure::<_, __InitOk, $err>(
- data,
- move |slot| {
- {
- // Shadow the structure so it cannot be used to return early.
- struct __InitOk;
- // Create the `this` so it can be referenced by the user inside of the
- // expressions creating the individual fields.
- $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
- // Initialize every field.
- $crate::try_init!(init_slot:
- @slot(slot),
- @munch_fields($($fields)*,),
- );
- // We use unreachable code to ensure that all fields have been mentioned exactly
- // once, this struct initializer will still be type-checked and complain with a
- // very natural error message if a field is forgotten/mentioned more than once.
- #[allow(unreachable_code, clippy::diverging_sub_expression)]
- if false {
- $crate::try_init!(make_initializer:
- @slot(slot),
- @type_name($t),
- @munch_fields($($fields)*,),
- @acc(),
- );
- }
- // Forget all guards, since initialization was a success.
- $crate::try_init!(forget_guards:
- @munch_fields($($fields)*,),
- );
- }
- Ok(__InitOk)
- }
- );
- let init = move |slot| -> ::core::result::Result<(), $err> {
- init(slot).map(|__InitOk| ())
- };
- let init = unsafe { $crate::init::init_from_closure::<_, $err>(init) };
- init
- }};
- (init_slot:
- @slot($slot:ident),
- @munch_fields( $(,)?),
- ) => {
- // Endpoint of munching, no fields are left.
- };
- (init_slot:
- @slot($slot:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- let $field = $val;
- // Call the initializer.
- //
- // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
- // return when an error/panic occurs.
- unsafe {
- $crate::init::Init::__init($field, ::core::ptr::addr_of_mut!((*$slot).$field))?;
- }
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_init!(init_slot:
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (init_slot:
- @slot($slot:ident),
- // Direct value init.
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- $(let $field = $val;)?
- // Call the initializer.
- //
- // SAFETY: The memory at `slot` is uninitialized.
- unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_init!(init_slot:
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields( $(,)?),
- @acc($($acc:tt)*),
- ) => {
- // Endpoint, nothing more to munch, create the initializer.
- // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
- // get the correct type inference here:
- unsafe {
- ::core::ptr::write($slot, $t {
- $($acc)*
- });
- }
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)*$field: ::core::panic!(),),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)*$field: ::core::panic!(),),
- );
- };
- (forget_guards:
- @munch_fields($(,)?),
- ) => {
- // Munching finished.
- };
- (forget_guards:
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
- (forget_guards:
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
}
/// A pin-initializer for the type `T`.
@@ -1100,6 +775,79 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
/// deallocate.
/// - `slot` will not move until it is dropped, i.e. it will be pinned.
unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![allow(clippy::disallowed_names)]
+ /// use kernel::{types::Opaque, init::pin_init_from_closure};
+ /// #[repr(C)]
+ /// struct RawFoo([u8; 16]);
+ /// extern {
+ /// fn init_foo(_: *mut RawFoo);
+ /// }
+ ///
+ /// #[pin_data]
+ /// struct Foo {
+ /// #[pin]
+ /// raw: Opaque<RawFoo>,
+ /// }
+ ///
+ /// impl Foo {
+ /// fn setup(self: Pin<&mut Self>) {
+ /// pr_info!("Setting up foo");
+ /// }
+ /// }
+ ///
+ /// let foo = pin_init!(Foo {
+ /// raw <- unsafe {
+ /// Opaque::ffi_init(|s| {
+ /// init_foo(s);
+ /// })
+ /// },
+ /// }).pin_chain(|foo| {
+ /// foo.setup();
+ /// Ok(())
+ /// });
+ /// ```
+ fn pin_chain<F>(self, f: F) -> ChainPinInit<Self, F, T, E>
+ where
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+ {
+ ChainPinInit(self, f, PhantomData)
+ }
+}
+
+/// An initializer returned by [`PinInit::pin_chain`].
+pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
+
+// SAFETY: The `__pinned_init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+// - considers `slot` pinned.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainPinInit<I, F, T, E>
+where
+ I: PinInit<T, E>,
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+{
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__pinned_init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ let val = unsafe { &mut *slot };
+ // SAFETY: `slot` is considered pinned.
+ let val = unsafe { Pin::new_unchecked(val) };
+ (self.1)(val).map_err(|e| {
+ // SAFETY: `slot` was initialized above.
+ unsafe { core::ptr::drop_in_place(slot) };
+ e
+ })
+ }
}
/// An initializer for `T`.
@@ -1132,7 +880,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
///
/// [`Arc<T>`]: crate::sync::Arc
#[must_use = "An initializer must be used in order to create its value."]
-pub unsafe trait Init<T: ?Sized, E = Infallible>: Sized {
+pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
/// Initializes `slot`.
///
/// # Safety
@@ -1141,16 +889,73 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: Sized {
/// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
/// deallocate.
unsafe fn __init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![allow(clippy::disallowed_names)]
+ /// use kernel::{types::Opaque, init::{self, init_from_closure}};
+ /// struct Foo {
+ /// buf: [u8; 1_000_000],
+ /// }
+ ///
+ /// impl Foo {
+ /// fn setup(&mut self) {
+ /// pr_info!("Setting up foo");
+ /// }
+ /// }
+ ///
+ /// let foo = init!(Foo {
+ /// buf <- init::zeroed()
+ /// }).chain(|foo| {
+ /// foo.setup();
+ /// Ok(())
+ /// });
+ /// ```
+ fn chain<F>(self, f: F) -> ChainInit<Self, F, T, E>
+ where
+ F: FnOnce(&mut T) -> Result<(), E>,
+ {
+ ChainInit(self, f, PhantomData)
+ }
}
-// SAFETY: Every in-place initializer can also be used as a pin-initializer.
-unsafe impl<T: ?Sized, E, I> PinInit<T, E> for I
+/// An initializer returned by [`Init::chain`].
+pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
+
+// SAFETY: The `__init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+unsafe impl<T: ?Sized, E, I, F> Init<T, E> for ChainInit<I, F, T, E>
where
I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
+{
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ (self.1)(unsafe { &mut *slot }).map_err(|e| {
+ // SAFETY: `slot` was initialized above.
+ unsafe { core::ptr::drop_in_place(slot) };
+ e
+ })
+ }
+}
+
+// SAFETY: `__pinned_init` behaves exactly the same as `__init`.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainInit<I, F, T, E>
+where
+ I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
{
unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
- // SAFETY: `__init` meets the same requirements as `__pinned_init`, except that it does not
- // require `slot` to not move after init.
+ // SAFETY: `__init` has less strict requirements compared to `__pinned_init`.
unsafe { self.__init(slot) }
}
}
@@ -1202,6 +1007,93 @@ pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
unsafe { init_from_closure(|_| Ok(())) }
}
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// use kernel::{error::Error, init::init_array_from_fn};
+/// let array: Box<[usize; 1_000]>= Box::init::<Error>(init_array_from_fn(|i| i)).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl Init<[T; N], E>
+where
+ I: Init<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ // Counts the number of initialized elements and when dropped drops that many elements from
+ // `slot`.
+ let mut init_count = ScopeGuard::new_with_data(0, |i| {
+ // We now free every element that has been initialized before:
+ // SAFETY: The loop initialized exactly the values from 0..i and since we
+ // return `Err` below, the caller will consider the memory at `slot` as
+ // uninitialized.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ });
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ unsafe { init.__init(ptr) }?;
+ *init_count += 1;
+ }
+ init_count.dismiss();
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { init_from_closure(init) }
+}
+
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// use kernel::{sync::{Arc, Mutex}, init::pin_init_array_from_fn, new_mutex};
+/// let array: Arc<[Mutex<usize>; 1_000]>=
+/// Arc::pin_init(pin_init_array_from_fn(|i| new_mutex!(i))).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn pin_init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl PinInit<[T; N], E>
+where
+ I: PinInit<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ // Counts the number of initialized elements and when dropped drops that many elements from
+ // `slot`.
+ let mut init_count = ScopeGuard::new_with_data(0, |i| {
+ // We now free every element that has been initialized before:
+ // SAFETY: The loop initialized exactly the values from 0..i and since we
+ // return `Err` below, the caller will consider the memory at `slot` as
+ // uninitialized.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ });
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ unsafe { init.__pinned_init(ptr) }?;
+ *init_count += 1;
+ }
+ init_count.dismiss();
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { pin_init_from_closure(init) }
+}
+
// SAFETY: Every type can be initialized by-value.
unsafe impl<T, E> Init<T, E> for T {
unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
@@ -1210,6 +1102,13 @@ unsafe impl<T, E> Init<T, E> for T {
}
}
+// SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`.
+unsafe impl<T, E> PinInit<T, E> for T {
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ unsafe { self.__init(slot) }
+ }
+}
+
/// Smart pointer that can initialize memory in-place.
pub trait InPlaceInit<T>: Sized {
/// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
@@ -1398,6 +1297,11 @@ impl_zeroable! {
// SAFETY: Type is allowed to take any value, including all zeros.
{<T>} MaybeUninit<T>,
+ // SAFETY: Type is allowed to take any value, including all zeros.
+ {<T>} Opaque<T>,
+
+ // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`.
+ {<T: ?Sized + Zeroable>} UnsafeCell<T>,
// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
index 44751fb62b51a2..db3372619ecdd4 100644
--- a/rust/kernel/init/__internal.rs
+++ b/rust/kernel/init/__internal.rs
@@ -13,7 +13,7 @@ use super::*;
///
/// [nomicon]: https://doc.rust-lang.org/nomicon/subtyping.html
/// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns
-type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
+pub(super) type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this
/// type, since the closure needs to fulfill the same safety requirement as the
@@ -32,6 +32,18 @@ where
}
}
+// SAFETY: While constructing the `InitClosure`, the user promised that it upholds the
+// `__pinned_init` invariants.
+unsafe impl<T: ?Sized, F, E> PinInit<T, E> for InitClosure<F, T, E>
+where
+ F: FnOnce(*mut T) -> Result<(), E>,
+{
+ #[inline]
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ (self.0)(slot)
+ }
+}
+
/// This trait is only implemented via the `#[pin_data]` proc-macro. It is used to facilitate
/// the pin projections within the initializers.
///
@@ -174,7 +186,6 @@ impl<T> StackInit<T> {
/// Can be forgotten to prevent the drop.
pub struct DropGuard<T: ?Sized> {
ptr: *mut T,
- do_drop: Cell<bool>,
}
impl<T: ?Sized> DropGuard<T> {
@@ -190,32 +201,16 @@ impl<T: ?Sized> DropGuard<T> {
/// - will not be dropped by any other means.
#[inline]
pub unsafe fn new(ptr: *mut T) -> Self {
- Self {
- ptr,
- do_drop: Cell::new(true),
- }
- }
-
- /// Prevents this guard from dropping the supplied pointer.
- ///
- /// # Safety
- ///
- /// This function is unsafe in order to prevent safe code from forgetting this guard. It should
- /// only be called by the macros in this module.
- #[inline]
- pub unsafe fn forget(&self) {
- self.do_drop.set(false);
+ Self { ptr }
}
}
impl<T: ?Sized> Drop for DropGuard<T> {
#[inline]
fn drop(&mut self) {
- if self.do_drop.get() {
- // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
- // ensuring that this operation is safe.
- unsafe { ptr::drop_in_place(self.ptr) }
- }
+ // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
+ // ensuring that this operation is safe.
+ unsafe { ptr::drop_in_place(self.ptr) }
}
}
diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
index 00aa4e956c0ae9..cb6e61b6c50bda 100644
--- a/rust/kernel/init/macros.rs
+++ b/rust/kernel/init/macros.rs
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! This module provides the macros that actually implement the proc-macros `pin_data` and
-//! `pinned_drop`.
+//! `pinned_drop`. It also contains `__init_internal` the implementation of the `{try_}{pin_}init!`
+//! macros.
//!
//! These macros should never be called directly, since they expect their input to be
-//! in a certain format which is internal. Use the proc-macros instead.
+//! in a certain format which is internal. If used incorrectly, these macros can lead to UB even in
+//! safe code! Use the public facing macros instead.
//!
//! This architecture has been chosen because the kernel does not yet have access to `syn` which
//! would make matters a lot easier for implementing these as proc-macros.
@@ -43,7 +45,7 @@
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//!
@@ -168,8 +170,10 @@
//! t: T,
//! }
//! #[doc(hidden)]
-//! impl<'__pin, T>
-//! ::core::marker::Unpin for Bar<T> where __Unpin<'__pin, T>: ::core::marker::Unpin {}
+//! impl<'__pin, T> ::core::marker::Unpin for Bar<T>
+//! where
+//! __Unpin<'__pin, T>: ::core::marker::Unpin,
+//! {}
//! // Now we need to ensure that `Bar` does not implement `Drop`, since that would give users
//! // access to `&mut self` inside of `drop` even if the struct was pinned. This could lead to
//! // UB with only safe code, so we disallow this by giving a trait implementation error using
@@ -186,8 +190,9 @@
//! // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`.
//! #[allow(non_camel_case_types)]
//! trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
-//! impl<T: ::kernel::init::PinnedDrop>
-//! UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
+//! impl<
+//! T: ::kernel::init::PinnedDrop,
+//! > UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
//! impl<T> UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for Bar<T> {}
//! };
//! ```
@@ -217,7 +222,7 @@
//! // return type and shadow it later when we insert the arbitrary user code. That way
//! // there will be no possibility of returning without `unsafe`.
//! struct __InitOk;
-//! // Get the pin-data type from the initialized type.
+//! // Get the data about fields from the supplied type.
//! // - the function is unsafe, hence the unsafe block
//! // - we `use` the `HasPinData` trait in the block, it is only available in that
//! // scope.
@@ -225,8 +230,7 @@
//! use ::kernel::init::__internal::HasPinData;
//! Self::__pin_data()
//! };
-//! // Use `data` to help with type inference, the closure supplied will have the type
-//! // `FnOnce(*mut Self) -> Result<__InitOk, Infallible>`.
+//! // Ensure that `data` really is of type `PinData` and help with type inference:
//! let init = ::kernel::init::__internal::PinData::make_closure::<
//! _,
//! __InitOk,
@@ -234,71 +238,75 @@
//! >(data, move |slot| {
//! {
//! // Shadow the structure so it cannot be used to return early. If a user
-//! // tries to write `return Ok(__InitOk)`, then they get a type error, since
-//! // that will refer to this struct instead of the one defined above.
+//! // tries to write `return Ok(__InitOk)`, then they get a type error,
+//! // since that will refer to this struct instead of the one defined
+//! // above.
//! struct __InitOk;
//! // This is the expansion of `t,`, which is syntactic sugar for `t: t,`.
-//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).t), t) };
-//! // Since initialization could fail later (not in this case, since the error
-//! // type is `Infallible`) we will need to drop this field if there is an
-//! // error later. This `DropGuard` will drop the field when it gets dropped
-//! // and has not yet been forgotten. We make a reference to it, so users
-//! // cannot `mem::forget` it from the initializer, since the name is the same
-//! // as the field (including hygiene).
-//! let t = &unsafe {
-//! ::kernel::init::__internal::DropGuard::new(
-//! ::core::addr_of_mut!((*slot).t),
-//! )
+//! {
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).t), t) };
+//! }
+//! // Since initialization could fail later (not in this case, since the
+//! // error type is `Infallible`) we will need to drop this field if there
+//! // is an error later. This `DropGuard` will drop the field when it gets
+//! // dropped and has not yet been forgotten.
+//! let t = unsafe {
+//! ::pinned_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).t))
//! };
//! // Expansion of `x: 0,`:
-//! // Since this can be an arbitrary expression we cannot place it inside of
-//! // the `unsafe` block, so we bind it here.
-//! let x = 0;
-//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).x), x) };
+//! // Since this can be an arbitrary expression we cannot place it inside
+//! // of the `unsafe` block, so we bind it here.
+//! {
+//! let x = 0;
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).x), x) };
+//! }
//! // We again create a `DropGuard`.
-//! let x = &unsafe {
-//! ::kernel::init::__internal::DropGuard::new(
-//! ::core::addr_of_mut!((*slot).x),
-//! )
+//! let x = unsafe {
+//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).x))
//! };
-//!
+//! // Since initialization has successfully completed, we can now forget
+//! // the guards. This is not `mem::forget`, since we only have
+//! // `&DropGuard`.
+//! ::core::mem::forget(x);
+//! ::core::mem::forget(t);
//! // Here we use the type checker to ensure that every field has been
//! // initialized exactly once, since this is `if false` it will never get
//! // executed, but still type-checked.
-//! // Additionally we abuse `slot` to automatically infer the correct type for
-//! // the struct. This is also another check that every field is accessible
-//! // from this scope.
+//! // Additionally we abuse `slot` to automatically infer the correct type
+//! // for the struct. This is also another check that every field is
+//! // accessible from this scope.
//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
-//! if false {
+//! let _ = || {
//! unsafe {
//! ::core::ptr::write(
//! slot,
//! Self {
-//! // We only care about typecheck finding every field here,
-//! // the expression does not matter, just conjure one using
-//! // `panic!()`:
+//! // We only care about typecheck finding every field
+//! // here, the expression does not matter, just conjure
+//! // one using `panic!()`:
//! t: ::core::panic!(),
//! x: ::core::panic!(),
//! },
//! );
//! };
-//! }
-//! // Since initialization has successfully completed, we can now forget the
-//! // guards. This is not `mem::forget`, since we only have `&DropGuard`.
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(t) };
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(x) };
+//! };
//! }
//! // We leave the scope above and gain access to the previously shadowed
//! // `__InitOk` that we need to return.
//! Ok(__InitOk)
//! });
//! // Change the return type from `__InitOk` to `()`.
-//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! let init = move |
+//! slot,
+//! | -> ::core::result::Result<(), ::core::convert::Infallible> {
//! init(slot).map(|__InitOk| ())
//! };
//! // Construct the initializer.
//! let init = unsafe {
-//! ::kernel::init::pin_init_from_closure::<_, ::core::convert::Infallible>(init)
+//! ::kernel::init::pin_init_from_closure::<
+//! _,
+//! ::core::convert::Infallible,
+//! >(init)
//! };
//! init
//! }
@@ -372,7 +380,10 @@
//! b: Bar<u32>,
//! }
//! #[doc(hidden)]
-//! impl<'__pin> ::core::marker::Unpin for Foo where __Unpin<'__pin>: ::core::marker::Unpin {}
+//! impl<'__pin> ::core::marker::Unpin for Foo
+//! where
+//! __Unpin<'__pin>: ::core::marker::Unpin,
+//! {}
//! // Since we specified `PinnedDrop` as the argument to `#[pin_data]`, we expect `Foo` to
//! // implement `PinnedDrop`. Thus we do not need to prevent `Drop` implementations like
//! // before, instead we implement `Drop` here and delegate to `PinnedDrop`.
@@ -401,7 +412,7 @@
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//! ```
@@ -412,7 +423,7 @@
//! // `unsafe`, full path and the token parameter are added, everything else stays the same.
//! unsafe impl ::kernel::init::PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//! ```
@@ -447,18 +458,21 @@
//! >(data, move |slot| {
//! {
//! struct __InitOk;
-//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).a), a) };
-//! let a = &unsafe {
+//! {
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).a), a) };
+//! }
+//! let a = unsafe {
//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).a))
//! };
-//! let b = Bar::new(36);
+//! let init = Bar::new(36);
//! unsafe { data.b(::core::addr_of_mut!((*slot).b), b)? };
-//! let b = &unsafe {
+//! let b = unsafe {
//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).b))
//! };
-//!
+//! ::core::mem::forget(b);
+//! ::core::mem::forget(a);
//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
-//! if false {
+//! let _ = || {
//! unsafe {
//! ::core::ptr::write(
//! slot,
@@ -468,13 +482,13 @@
//! },
//! );
//! };
-//! }
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(a) };
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(b) };
+//! };
//! }
//! Ok(__InitOk)
//! });
-//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! let init = move |
+//! slot,
+//! | -> ::core::result::Result<(), ::core::convert::Infallible> {
//! init(slot).map(|__InitOk| ())
//! };
//! let init = unsafe {
@@ -960,6 +974,7 @@ macro_rules! __pin_data {
where $($whr)*
{
$(
+ $(#[$($p_attr)*])*
$pvis unsafe fn $p_field<E>(
self,
slot: *mut $p_type,
@@ -969,6 +984,7 @@ macro_rules! __pin_data {
}
)*
$(
+ $(#[$($attr)*])*
$fvis unsafe fn $field<E>(
self,
slot: *mut $type,
@@ -980,3 +996,388 @@ macro_rules! __pin_data {
}
};
}
+
+/// The internal init macro. Do not call manually!
+///
+/// This is called by the `{try_}{pin_}init!` macros with various inputs.
+///
+/// This macro has multiple internal call configurations, these are always the very first ident:
+/// - nothing: this is the base case and called by the `{try_}{pin_}init!` macros.
+/// - `with_update_parsed`: when the `..Zeroable::zeroed()` syntax has been handled.
+/// - `init_slot`: recursively creates the code that initializes all fields in `slot`.
+/// - `make_initializer`: recursively create the struct initializer that guarantees that every
+/// field has been initialized exactly once.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __init_internal {
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields(),
+ ) => {
+ $crate::__init_internal!(with_update_parsed:
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @zeroed(), // Nothing means default behavior.
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields(..Zeroable::zeroed()),
+ ) => {
+ $crate::__init_internal!(with_update_parsed:
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @zeroed(()), // `()` means zero all fields not mentioned.
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields($ignore:tt $($rest:tt)*),
+ ) => {
+ $crate::__init_internal!(
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @munch_fields($($rest)*),
+ )
+ };
+ (with_update_parsed:
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @zeroed($($init_zeroed:expr)?),
+ ) => {{
+ // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
+ // type and shadow it later when we insert the arbitrary user code. That way there will be
+ // no possibility of returning without `unsafe`.
+ struct __InitOk;
+ // Get the data about fields from the supplied type.
+ let data = unsafe {
+ use $crate::init::__internal::$has_data;
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!($t::$get_data())
+ };
+ // Ensure that `data` really is of type `$data` and help with type inference:
+ let init = $crate::init::__internal::$data::make_closure::<_, __InitOk, $err>(
+ data,
+ move |slot| {
+ {
+ // Shadow the structure so it cannot be used to return early.
+ struct __InitOk;
+ // If `$init_zeroed` is present we should zero the slot now and not emit an
+ // error when fields are missing (since they will be zeroed). We also have to
+ // check that the type actually implements `Zeroable`.
+ $({
+ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
+ // Ensure that the struct is indeed `Zeroable`.
+ assert_zeroable(slot);
+ // SAFETY: The type implements `Zeroable` by the check above.
+ unsafe { ::core::ptr::write_bytes(slot, 0, 1) };
+ $init_zeroed // This will be `()` if set.
+ })?
+ // Create the `this` so it can be referenced by the user inside of the
+ // expressions creating the individual fields.
+ $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
+ // Initialize every field.
+ $crate::__init_internal!(init_slot($($use_data)?):
+ @data(data),
+ @slot(slot),
+ @guards(),
+ @munch_fields($($fields)*,),
+ );
+ // We use unreachable code to ensure that all fields have been mentioned exactly
+ // once, this struct initializer will still be type-checked and complain with a
+ // very natural error message if a field is forgotten/mentioned more than once.
+ #[allow(unreachable_code, clippy::diverging_sub_expression)]
+ let _ = || {
+ $crate::__init_internal!(make_initializer:
+ @slot(slot),
+ @type_name($t),
+ @munch_fields($($fields)*,),
+ @acc(),
+ );
+ };
+ }
+ Ok(__InitOk)
+ }
+ );
+ let init = move |slot| -> ::core::result::Result<(), $err> {
+ init(slot).map(|__InitOk| ())
+ };
+ let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) };
+ init
+ }};
+ (init_slot($($use_data:ident)?):
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ @munch_fields($(..Zeroable::zeroed())? $(,)?),
+ ) => {
+ // Endpoint of munching, no fields are left. If execution reaches this point, all fields
+ // have been initialized. Therefore we can now dismiss the guards by forgetting them.
+ $(::core::mem::forget($guards);)*
+ };
+ (init_slot($use_data:ident): // `use_data` is present, so we use the `data` to init fields.
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // In-place initialization syntax.
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let init = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot($use_data):
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot(): // No `use_data`, so we use `Init::__init` directly.
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // In-place initialization syntax.
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let init = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($($use_data:ident)?):
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot($($use_data)?):
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields(..Zeroable::zeroed() $(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer. Since the users specified
+ // `..Zeroable::zeroed()`, the slot will already have been zeroed and all field that have
+ // not been overwritten are thus zero and initialized. We still check that all fields are
+ // actually accessible by using the struct update syntax ourselves.
+ // We are inside of a closure that is never executed and thus we can abuse `slot` to
+ // get the correct type inference here:
+ #[allow(unused_assignments)]
+ unsafe {
+ let mut zeroed = ::core::mem::zeroed();
+ // We have to use type inference here to make zeroed have the correct type. This does
+ // not get executed, so it has no effect.
+ ::core::ptr::write($slot, zeroed);
+ zeroed = ::core::mem::zeroed();
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!(
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ ..zeroed
+ });
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer.
+ // Since we are in the closure that is never called, this will never get executed.
+ // We abuse `slot` to get the correct type inference here:
+ unsafe {
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!(
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ });
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::__init_internal!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::__init_internal!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __derive_zeroable {
+ (parse_input:
+ @sig(
+ $(#[$($struct_attr:tt)*])*
+ $vis:vis struct $name:ident
+ $(where $($whr:tt)*)?
+ ),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @body({
+ $(
+ $(#[$($field_attr:tt)*])*
+ $field:ident : $field_ty:ty
+ ),* $(,)?
+ }),
+ ) => {
+ // SAFETY: Every field type implements `Zeroable` and padding bytes may be zero.
+ #[automatically_derived]
+ unsafe impl<$($impl_generics)*> $crate::init::Zeroable for $name<$($ty_generics)*>
+ where
+ $($($whr)*)?
+ {}
+ const _: () = {
+ fn assert_zeroable<T: ?::core::marker::Sized + $crate::init::Zeroable>() {}
+ fn ensure_zeroable<$($impl_generics)*>()
+ where $($($whr)*)?
+ {
+ $(assert_zeroable::<$field_ty>();)*
+ }
+ };
+ };
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 3642cadc34b1a2..e8811700239aaf 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -95,7 +95,4 @@ fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
pr_emerg!("{}\n", info);
// SAFETY: FFI call.
unsafe { bindings::BUG() };
- // Bindgen currently does not recognize `__noreturn` so `BUG` returns `()`
- // instead of `!`. See <https://github.com/rust-lang/rust-bindgen/issues/2094>.
- loop {}
}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index c28587d68ebc1a..ae21600970b395 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -18,7 +18,7 @@ pub use core::pin::Pin;
pub use alloc::{boxed::Box, vec::Vec};
#[doc(no_inline)]
-pub use macros::{module, pin_data, pinned_drop, vtable};
+pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable};
pub use super::build_assert;
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index a2216325632dff..70a785f047546e 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -72,8 +72,8 @@ pub unsafe trait Backend {
/// A mutual exclusion primitive.
///
-/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock backend
-/// specified as the generic parameter `B`.
+/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
+/// [`Backend`] specified as the generic parameter `B`.
#[pin_data]
pub struct Lock<T: ?Sized, B: Backend> {
/// The kernel lock object.
@@ -126,7 +126,7 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// A lock guard.
///
-/// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock
+/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
/// protected by the lock.
#[must_use = "the lock unlocks immediately when the guard is unused"]
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index e33b901ab0a47f..fdb778e65d79d3 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -6,7 +6,7 @@ use crate::init::{self, PinInit};
use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
- marker::PhantomData,
+ marker::{PhantomData, PhantomPinned},
mem::MaybeUninit,
ops::{Deref, DerefMut},
ptr::NonNull,
@@ -206,17 +206,26 @@ impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
///
/// This is meant to be used with FFI objects that are never interpreted by Rust code.
#[repr(transparent)]
-pub struct Opaque<T>(MaybeUninit<UnsafeCell<T>>);
+pub struct Opaque<T> {
+ value: UnsafeCell<MaybeUninit<T>>,
+ _pin: PhantomPinned,
+}
impl<T> Opaque<T> {
/// Creates a new opaque value.
pub const fn new(value: T) -> Self {
- Self(MaybeUninit::new(UnsafeCell::new(value)))
+ Self {
+ value: UnsafeCell::new(MaybeUninit::new(value)),
+ _pin: PhantomPinned,
+ }
}
/// Creates an uninitialised value.
pub const fn uninit() -> Self {
- Self(MaybeUninit::uninit())
+ Self {
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ _pin: PhantomPinned,
+ }
}
/// Creates a pin-initializer from the given initializer closure.
@@ -240,7 +249,7 @@ impl<T> Opaque<T> {
/// Returns a raw pointer to the opaque data.
pub fn get(&self) -> *mut T {
- UnsafeCell::raw_get(self.0.as_ptr())
+ UnsafeCell::get(&self.value).cast::<T>()
}
/// Gets the value behind `this`.
@@ -248,7 +257,7 @@ impl<T> Opaque<T> {
/// This function is useful to get access to the value without creating intermediate
/// references.
pub const fn raw_get(this: *const Self) -> *mut T {
- UnsafeCell::raw_get(this.cast::<UnsafeCell<T>>())
+ UnsafeCell::raw_get(this.cast::<UnsafeCell<MaybeUninit<T>>>()).cast::<T>()
}
}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 3fc74cb4ea1903..c42105c2ff9635 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -7,9 +7,11 @@ mod quote;
mod concat_idents;
mod helpers;
mod module;
+mod paste;
mod pin_data;
mod pinned_drop;
mod vtable;
+mod zeroable;
use proc_macro::TokenStream;
@@ -246,3 +248,118 @@ pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream {
pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
pinned_drop::pinned_drop(args, input)
}
+
+/// Paste identifiers together.
+///
+/// Within the `paste!` macro, identifiers inside `[<` and `>]` are concatenated together to form a
+/// single identifier.
+///
+/// This is similar to the [`paste`] crate, but with pasting feature limited to identifiers
+/// (literals, lifetimes and documentation strings are not supported). There is a difference in
+/// supported modifiers as well.
+///
+/// # Example
+///
+/// ```ignore
+/// use kernel::macro::paste;
+///
+/// macro_rules! pub_no_prefix {
+/// ($prefix:ident, $($newname:ident),+) => {
+/// paste! {
+/// $(pub(crate) const $newname: u32 = [<$prefix $newname>];)+
+/// }
+/// };
+/// }
+///
+/// pub_no_prefix!(
+/// binder_driver_return_protocol_,
+/// BR_OK,
+/// BR_ERROR,
+/// BR_TRANSACTION,
+/// BR_REPLY,
+/// BR_DEAD_REPLY,
+/// BR_TRANSACTION_COMPLETE,
+/// BR_INCREFS,
+/// BR_ACQUIRE,
+/// BR_RELEASE,
+/// BR_DECREFS,
+/// BR_NOOP,
+/// BR_SPAWN_LOOPER,
+/// BR_DEAD_BINDER,
+/// BR_CLEAR_DEATH_NOTIFICATION_DONE,
+/// BR_FAILED_REPLY
+/// );
+///
+/// assert_eq!(BR_OK, binder_driver_return_protocol_BR_OK);
+/// ```
+///
+/// # Modifiers
+///
+/// For each identifier, it is possible to attach one or multiple modifiers to
+/// it.
+///
+/// Currently supported modifiers are:
+/// * `span`: change the span of concatenated identifier to the span of the specified token. By
+/// default the span of the `[< >]` group is used.
+/// * `lower`: change the identifier to lower case.
+/// * `upper`: change the identifier to upper case.
+///
+/// ```ignore
+/// use kernel::macro::paste;
+///
+/// macro_rules! pub_no_prefix {
+/// ($prefix:ident, $($newname:ident),+) => {
+/// kernel::macros::paste! {
+/// $(pub(crate) const fn [<$newname:lower:span>]: u32 = [<$prefix $newname:span>];)+
+/// }
+/// };
+/// }
+///
+/// pub_no_prefix!(
+/// binder_driver_return_protocol_,
+/// BR_OK,
+/// BR_ERROR,
+/// BR_TRANSACTION,
+/// BR_REPLY,
+/// BR_DEAD_REPLY,
+/// BR_TRANSACTION_COMPLETE,
+/// BR_INCREFS,
+/// BR_ACQUIRE,
+/// BR_RELEASE,
+/// BR_DECREFS,
+/// BR_NOOP,
+/// BR_SPAWN_LOOPER,
+/// BR_DEAD_BINDER,
+/// BR_CLEAR_DEATH_NOTIFICATION_DONE,
+/// BR_FAILED_REPLY
+/// );
+///
+/// assert_eq!(br_ok(), binder_driver_return_protocol_BR_OK);
+/// ```
+///
+/// [`paste`]: https://docs.rs/paste/
+#[proc_macro]
+pub fn paste(input: TokenStream) -> TokenStream {
+ let mut tokens = input.into_iter().collect();
+ paste::expand(&mut tokens);
+ tokens.into_iter().collect()
+}
+
+/// Derives the [`Zeroable`] trait for the given struct.
+///
+/// This can only be used for structs where every field implements the [`Zeroable`] trait.
+///
+/// # Examples
+///
+/// ```rust,ignore
+/// #[derive(Zeroable)]
+/// pub struct DriverData {
+/// id: i64,
+/// buf_ptr: *mut u8,
+/// len: usize,
+/// }
+/// ```
+#[proc_macro_derive(Zeroable)]
+pub fn derive_zeroable(input: TokenStream) -> TokenStream {
+ zeroable::derive(input)
+}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index fb1244f8c2e694..d62d8710d77ab0 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -199,7 +199,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
/// Used by the printing macros, e.g. [`info!`].
const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
- /// The \"Rust loadable module\" mark, for `scripts/is_rust_module.sh`.
+ /// The \"Rust loadable module\" mark.
//
// This may be best done another way later on, e.g. as a new modinfo
// key or a new section. For the moment, keep it simple.
diff --git a/rust/macros/paste.rs b/rust/macros/paste.rs
new file mode 100644
index 00000000000000..385a78434224f6
--- /dev/null
+++ b/rust/macros/paste.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{Delimiter, Group, Ident, Spacing, Span, TokenTree};
+
+fn concat(tokens: &[TokenTree], group_span: Span) -> TokenTree {
+ let mut tokens = tokens.iter();
+ let mut segments = Vec::new();
+ let mut span = None;
+ loop {
+ match tokens.next() {
+ None => break,
+ Some(TokenTree::Literal(lit)) => segments.push((lit.to_string(), lit.span())),
+ Some(TokenTree::Ident(ident)) => {
+ let mut value = ident.to_string();
+ if value.starts_with("r#") {
+ value.replace_range(0..2, "");
+ }
+ segments.push((value, ident.span()));
+ }
+ Some(TokenTree::Punct(p)) if p.as_char() == ':' => {
+ let Some(TokenTree::Ident(ident)) = tokens.next() else {
+ panic!("expected identifier as modifier");
+ };
+
+ let (mut value, sp) = segments.pop().expect("expected identifier before modifier");
+ match ident.to_string().as_str() {
+ // Set the overall span of concatenated token as current span
+ "span" => {
+ assert!(
+ span.is_none(),
+ "span modifier should only appear at most once"
+ );
+ span = Some(sp);
+ }
+ "lower" => value = value.to_lowercase(),
+ "upper" => value = value.to_uppercase(),
+ v => panic!("unknown modifier `{v}`"),
+ };
+ segments.push((value, sp));
+ }
+ _ => panic!("unexpected token in paste segments"),
+ };
+ }
+
+ let pasted: String = segments.into_iter().map(|x| x.0).collect();
+ TokenTree::Ident(Ident::new(&pasted, span.unwrap_or(group_span)))
+}
+
+pub(crate) fn expand(tokens: &mut Vec<TokenTree>) {
+ for token in tokens.iter_mut() {
+ if let TokenTree::Group(group) = token {
+ let delimiter = group.delimiter();
+ let span = group.span();
+ let mut stream: Vec<_> = group.stream().into_iter().collect();
+ // Find groups that looks like `[< A B C D >]`
+ if delimiter == Delimiter::Bracket
+ && stream.len() >= 3
+ && matches!(&stream[0], TokenTree::Punct(p) if p.as_char() == '<')
+ && matches!(&stream[stream.len() - 1], TokenTree::Punct(p) if p.as_char() == '>')
+ {
+ // Replace the group with concatenated token
+ *token = concat(&stream[1..stream.len() - 1], span);
+ } else {
+ // Recursively expand tokens inside the group
+ expand(&mut stream);
+ let mut group = Group::new(delimiter, stream.into_iter().collect());
+ group.set_span(span);
+ *token = TokenTree::Group(group);
+ }
+ }
+ }
+
+ // Path segments cannot contain invisible delimiter group, so remove them if any.
+ for i in (0..tokens.len().saturating_sub(3)).rev() {
+ // Looking for a double colon
+ if matches!(
+ (&tokens[i + 1], &tokens[i + 2]),
+ (TokenTree::Punct(a), TokenTree::Punct(b))
+ if a.as_char() == ':' && a.spacing() == Spacing::Joint && b.as_char() == ':'
+ ) {
+ match &tokens[i + 3] {
+ TokenTree::Group(group) if group.delimiter() == Delimiter::None => {
+ tokens.splice(i + 3..i + 4, group.stream());
+ }
+ _ => (),
+ }
+
+ match &tokens[i] {
+ TokenTree::Group(group) if group.delimiter() == Delimiter::None => {
+ tokens.splice(i..i + 1, group.stream());
+ }
+ _ => (),
+ }
+ }
+ }
+}
diff --git a/rust/macros/quote.rs b/rust/macros/quote.rs
index dddbb4e6f4cb88..33a199e4f17691 100644
--- a/rust/macros/quote.rs
+++ b/rust/macros/quote.rs
@@ -124,6 +124,18 @@ macro_rules! quote_spanned {
));
quote_spanned!(@proc $v $span $($tt)*);
};
+ (@proc $v:ident $span:ident ; $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new(';', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident + $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new('+', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
(@proc $v:ident $span:ident $id:ident $($tt:tt)*) => {
$v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new(stringify!($id), $span)));
quote_spanned!(@proc $v $span $($tt)*);
diff --git a/rust/macros/zeroable.rs b/rust/macros/zeroable.rs
new file mode 100644
index 00000000000000..0d605c46ab3b40
--- /dev/null
+++ b/rust/macros/zeroable.rs
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::helpers::{parse_generics, Generics};
+use proc_macro::{TokenStream, TokenTree};
+
+pub(crate) fn derive(input: TokenStream) -> TokenStream {
+ let (
+ Generics {
+ impl_generics,
+ ty_generics,
+ },
+ mut rest,
+ ) = parse_generics(input);
+ // This should be the body of the struct `{...}`.
+ let last = rest.pop();
+ // Now we insert `Zeroable` as a bound for every generic parameter in `impl_generics`.
+ let mut new_impl_generics = Vec::with_capacity(impl_generics.len());
+ // Are we inside of a generic where we want to add `Zeroable`?
+ let mut in_generic = !impl_generics.is_empty();
+ // Have we already inserted `Zeroable`?
+ let mut inserted = false;
+ // Level of `<>` nestings.
+ let mut nested = 0;
+ for tt in impl_generics {
+ match &tt {
+ // If we find a `,`, then we have finished a generic/constant/lifetime parameter.
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == ',' => {
+ if in_generic && !inserted {
+ new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ }
+ in_generic = true;
+ inserted = false;
+ new_impl_generics.push(tt);
+ }
+ // If we find `'`, then we are entering a lifetime.
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == '\'' => {
+ in_generic = false;
+ new_impl_generics.push(tt);
+ }
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == ':' => {
+ new_impl_generics.push(tt);
+ if in_generic {
+ new_impl_generics.extend(quote! { ::kernel::init::Zeroable + });
+ inserted = true;
+ }
+ }
+ TokenTree::Punct(p) if p.as_char() == '<' => {
+ nested += 1;
+ new_impl_generics.push(tt);
+ }
+ TokenTree::Punct(p) if p.as_char() == '>' => {
+ assert!(nested > 0);
+ nested -= 1;
+ new_impl_generics.push(tt);
+ }
+ _ => new_impl_generics.push(tt),
+ }
+ }
+ assert_eq!(nested, 0);
+ if in_generic && !inserted {
+ new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ }
+ quote! {
+ ::kernel::__derive_zeroable!(
+ parse_input:
+ @sig(#(#rest)*),
+ @impl_generics(#(#new_impl_generics)*),
+ @ty_generics(#(#ty_generics)*),
+ @body(#last),
+ );
+ }
+}