kernel/sync/atomic/internal.rs
1// SPDX-License-Identifier: GPL-2.0
2
3//! Atomic internal implementations.
4//!
5//! Provides 1:1 mapping to the C atomic operations.
6
7use crate::bindings;
8use crate::macros::paste;
9use core::cell::UnsafeCell;
10
11mod private {
12 /// Sealed trait marker to disable customized impls on atomic implementation traits.
13 pub trait Sealed {}
14}
15
16// `i32` and `i64` are only supported atomic implementations.
17impl private::Sealed for i32 {}
18impl private::Sealed for i64 {}
19
20/// A marker trait for types that implement atomic operations with C side primitives.
21///
22/// This trait is sealed, and only types that have directly mapping to the C side atomics should
23/// impl this:
24///
25/// - `i32` maps to `atomic_t`.
26/// - `i64` maps to `atomic64_t`.
27pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
28 /// The type of the delta in arithmetic or logical operations.
29 ///
30 /// For example, in `atomic_add(ptr, v)`, it's the type of `v`. Usually it's the same type of
31 /// [`Self`], but it may be different for the atomic pointer type.
32 type Delta;
33}
34
35// `atomic_t` implements atomic operations on `i32`.
36impl AtomicImpl for i32 {
37 type Delta = Self;
38}
39
40// `atomic64_t` implements atomic operations on `i64`.
41impl AtomicImpl for i64 {
42 type Delta = Self;
43}
44
45/// Atomic representation.
46#[repr(transparent)]
47pub struct AtomicRepr<T: AtomicImpl>(UnsafeCell<T>);
48
49impl<T: AtomicImpl> AtomicRepr<T> {
50 /// Creates a new atomic representation `T`.
51 pub const fn new(v: T) -> Self {
52 Self(UnsafeCell::new(v))
53 }
54
55 /// Returns a pointer to the underlying `T`.
56 ///
57 /// # Guarantees
58 ///
59 /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
60 pub const fn as_ptr(&self) -> *mut T {
61 // GUARANTEE: `self.0` is an `UnsafeCell<T>`, therefore the pointer returned by `.get()`
62 // must be valid and properly aligned.
63 self.0.get()
64 }
65}
66
67// This macro generates the function signature with given argument list and return type.
68macro_rules! declare_atomic_method {
69 (
70 $(#[doc=$doc:expr])*
71 $func:ident($($arg:ident : $arg_type:ty),*) $(-> $ret:ty)?
72 ) => {
73 paste!(
74 $(#[doc = $doc])*
75 fn [< atomic_ $func >]($($arg: $arg_type,)*) $(-> $ret)?;
76 );
77 };
78 (
79 $(#[doc=$doc:expr])*
80 $func:ident [$variant:ident $($rest:ident)*]($($arg_sig:tt)*) $(-> $ret:ty)?
81 ) => {
82 paste!(
83 declare_atomic_method!(
84 $(#[doc = $doc])*
85 [< $func _ $variant >]($($arg_sig)*) $(-> $ret)?
86 );
87 );
88
89 declare_atomic_method!(
90 $(#[doc = $doc])*
91 $func [$($rest)*]($($arg_sig)*) $(-> $ret)?
92 );
93 };
94 (
95 $(#[doc=$doc:expr])*
96 $func:ident []($($arg_sig:tt)*) $(-> $ret:ty)?
97 ) => {
98 declare_atomic_method!(
99 $(#[doc = $doc])*
100 $func($($arg_sig)*) $(-> $ret)?
101 );
102 }
103}
104
105// This macro generates the function implementation with given argument list and return type, and it
106// will replace "call(...)" expression with "$ctype _ $func" to call the real C function.
107macro_rules! impl_atomic_method {
108 (
109 ($ctype:ident) $func:ident($($arg:ident: $arg_type:ty),*) $(-> $ret:ty)? {
110 $unsafe:tt { call($($c_arg:expr),*) }
111 }
112 ) => {
113 paste!(
114 #[inline(always)]
115 fn [< atomic_ $func >]($($arg: $arg_type,)*) $(-> $ret)? {
116 // TODO: Ideally we want to use the SAFETY comments written at the macro invocation
117 // (e.g. in `declare_and_impl_atomic_methods!()`, however, since SAFETY comments
118 // are just comments, and they are not passed to macros as tokens, therefore we
119 // cannot use them here. One potential improvement is that if we support using
120 // attributes as an alternative for SAFETY comments, then we can use that for macro
121 // generating code.
122 //
123 // SAFETY: specified on macro invocation.
124 $unsafe { bindings::[< $ctype _ $func >]($($c_arg,)*) }
125 }
126 );
127 };
128 (
129 ($ctype:ident) $func:ident[$variant:ident $($rest:ident)*]($($arg_sig:tt)*) $(-> $ret:ty)? {
130 $unsafe:tt { call($($arg:tt)*) }
131 }
132 ) => {
133 paste!(
134 impl_atomic_method!(
135 ($ctype) [< $func _ $variant >]($($arg_sig)*) $( -> $ret)? {
136 $unsafe { call($($arg)*) }
137 }
138 );
139 );
140 impl_atomic_method!(
141 ($ctype) $func [$($rest)*]($($arg_sig)*) $( -> $ret)? {
142 $unsafe { call($($arg)*) }
143 }
144 );
145 };
146 (
147 ($ctype:ident) $func:ident[]($($arg_sig:tt)*) $( -> $ret:ty)? {
148 $unsafe:tt { call($($arg:tt)*) }
149 }
150 ) => {
151 impl_atomic_method!(
152 ($ctype) $func($($arg_sig)*) $(-> $ret)? {
153 $unsafe { call($($arg)*) }
154 }
155 );
156 }
157}
158
159// Delcares $ops trait with methods and implements the trait for `i32` and `i64`.
160macro_rules! declare_and_impl_atomic_methods {
161 ($(#[$attr:meta])* $pub:vis trait $ops:ident {
162 $(
163 $(#[doc=$doc:expr])*
164 fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
165 $unsafe:tt { bindings::#call($($arg:tt)*) }
166 }
167 )*
168 }) => {
169 $(#[$attr])*
170 $pub trait $ops: AtomicImpl {
171 $(
172 declare_atomic_method!(
173 $(#[doc=$doc])*
174 $func[$($variant)*]($($arg_sig)*) $(-> $ret)?
175 );
176 )*
177 }
178
179 impl $ops for i32 {
180 $(
181 impl_atomic_method!(
182 (atomic) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
183 $unsafe { call($($arg)*) }
184 }
185 );
186 )*
187 }
188
189 impl $ops for i64 {
190 $(
191 impl_atomic_method!(
192 (atomic64) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
193 $unsafe { call($($arg)*) }
194 }
195 );
196 )*
197 }
198 }
199}
200
201declare_and_impl_atomic_methods!(
202 /// Basic atomic operations
203 pub trait AtomicBasicOps {
204 /// Atomic read (load).
205 fn read[acquire](a: &AtomicRepr<Self>) -> Self {
206 // SAFETY: `a.as_ptr()` is valid and properly aligned.
207 unsafe { bindings::#call(a.as_ptr().cast()) }
208 }
209
210 /// Atomic set (store).
211 fn set[release](a: &AtomicRepr<Self>, v: Self) {
212 // SAFETY: `a.as_ptr()` is valid and properly aligned.
213 unsafe { bindings::#call(a.as_ptr().cast(), v) }
214 }
215 }
216);
217
218declare_and_impl_atomic_methods!(
219 /// Exchange and compare-and-exchange atomic operations
220 pub trait AtomicExchangeOps {
221 /// Atomic exchange.
222 ///
223 /// Atomically updates `*a` to `v` and returns the old value.
224 fn xchg[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self) -> Self {
225 // SAFETY: `a.as_ptr()` is valid and properly aligned.
226 unsafe { bindings::#call(a.as_ptr().cast(), v) }
227 }
228
229 /// Atomic compare and exchange.
230 ///
231 /// If `*a` == `*old`, atomically updates `*a` to `new`. Otherwise, `*a` is not
232 /// modified, `*old` is updated to the current value of `*a`.
233 ///
234 /// Return `true` if the update of `*a` occurred, `false` otherwise.
235 fn try_cmpxchg[acquire, release, relaxed](
236 a: &AtomicRepr<Self>, old: &mut Self, new: Self
237 ) -> bool {
238 // SAFETY: `a.as_ptr()` is valid and properly aligned. `core::ptr::from_mut(old)`
239 // is valid and properly aligned.
240 unsafe { bindings::#call(a.as_ptr().cast(), core::ptr::from_mut(old), new) }
241 }
242 }
243);
244
245declare_and_impl_atomic_methods!(
246 /// Atomic arithmetic operations
247 pub trait AtomicArithmeticOps {
248 /// Atomic add (wrapping).
249 ///
250 /// Atomically updates `*a` to `(*a).wrapping_add(v)`.
251 fn add[](a: &AtomicRepr<Self>, v: Self::Delta) {
252 // SAFETY: `a.as_ptr()` is valid and properly aligned.
253 unsafe { bindings::#call(v, a.as_ptr().cast()) }
254 }
255
256 /// Atomic fetch and add (wrapping).
257 ///
258 /// Atomically updates `*a` to `(*a).wrapping_add(v)`, and returns the value of `*a`
259 /// before the update.
260 fn fetch_add[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self {
261 // SAFETY: `a.as_ptr()` is valid and properly aligned.
262 unsafe { bindings::#call(v, a.as_ptr().cast()) }
263 }
264 }
265);