kernel/sync/atomic.rs
1// SPDX-License-Identifier: GPL-2.0
2
3//! Atomic primitives.
4//!
5//! These primitives have the same semantics as their C counterparts: and the precise definitions of
6//! semantics can be found at [`LKMM`]. Note that Linux Kernel Memory (Consistency) Model is the
7//! only model for Rust code in kernel, and Rust's own atomics should be avoided.
8//!
9//! # Data races
10//!
11//! [`LKMM`] atomics have different rules regarding data races:
12//!
13//! - A normal write from C side is treated as an atomic write if
14//! CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y.
15//! - Mixed-size atomic accesses don't cause data races.
16//!
17//! [`LKMM`]: srctree/tools/memory-model/
18
19mod internal;
20pub mod ordering;
21mod predefine;
22
23pub use internal::AtomicImpl;
24pub use ordering::{Acquire, Full, Relaxed, Release};
25pub(crate) use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps};
26
27use crate::build_error;
28use internal::AtomicRepr;
29use ordering::OrderingType;
30
31/// A memory location which can be safely modified from multiple execution contexts.
32///
33/// This has the same size, alignment and bit validity as the underlying type `T`. And it disables
34/// niche optimization for the same reason as [`UnsafeCell`].
35///
36/// The atomic operations are implemented in a way that is fully compatible with the [Linux Kernel
37/// Memory (Consistency) Model][LKMM], hence they should be modeled as the corresponding
38/// [`LKMM`][LKMM] atomic primitives. With the help of [`Atomic::from_ptr()`] and
39/// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations]
40/// (including those without the `atomic` prefix, e.g. `READ_ONCE()`, `WRITE_ONCE()`,
41/// `smp_load_acquire()` and `smp_store_release()`).
42///
43/// # Invariants
44///
45/// `self.0` is a valid `T`.
46///
47/// [`UnsafeCell`]: core::cell::UnsafeCell
48/// [LKMM]: srctree/tools/memory-model/
49/// [C-side atomic operations]: srctree/Documentation/atomic_t.txt
50#[repr(transparent)]
51pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>);
52
53// SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic.
54unsafe impl<T: AtomicType> Sync for Atomic<T> {}
55
56/// Types that support basic atomic operations.
57///
58/// # Round-trip transmutability
59///
60/// `T` is round-trip transmutable to `U` if and only if both of these properties hold:
61///
62/// - Any valid bit pattern for `T` is also a valid bit pattern for `U`.
63/// - Transmuting (e.g. using [`transmute()`]) a value of type `T` to `U` and then to `T` again
64/// yields a value that is in all aspects equivalent to the original value.
65///
66/// # Safety
67///
68/// - [`Self`] must have the same size and alignment as [`Self::Repr`].
69/// - [`Self`] must be [round-trip transmutable] to [`Self::Repr`].
70///
71/// Note that this is more relaxed than requiring the bi-directional transmutability (i.e.
72/// [`transmute()`] is always sound between `U` and `T`) because of the support for atomic
73/// variables over unit-only enums, see [Examples].
74///
75/// # Limitations
76///
77/// Because C primitives are used to implement the atomic operations, and a C function requires a
78/// valid object of a type to operate on (i.e. no `MaybeUninit<_>`), hence at the Rust <-> C
79/// surface, only types with all the bits initialized can be passed. As a result, types like `(u8,
80/// u16)` (padding bytes are uninitialized) are currently not supported.
81///
82/// # Examples
83///
84/// A unit-only enum that implements [`AtomicType`]:
85///
86/// ```
87/// use kernel::sync::atomic::{AtomicType, Atomic, Relaxed};
88///
89/// #[derive(Clone, Copy, PartialEq, Eq)]
90/// #[repr(i32)]
91/// enum State {
92/// Uninit = 0,
93/// Working = 1,
94/// Done = 2,
95/// };
96///
97/// // SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip
98/// // transmutable to `i32`.
99/// unsafe impl AtomicType for State {
100/// type Repr = i32;
101/// }
102///
103/// let s = Atomic::new(State::Uninit);
104///
105/// assert_eq!(State::Uninit, s.load(Relaxed));
106/// ```
107/// [`transmute()`]: core::mem::transmute
108/// [round-trip transmutable]: AtomicType#round-trip-transmutability
109/// [Examples]: AtomicType#examples
110pub unsafe trait AtomicType: Sized + Send + Copy {
111 /// The backing atomic implementation type.
112 type Repr: AtomicImpl;
113}
114
115/// Types that support atomic add operations.
116///
117/// # Safety
118///
119// TODO: Properly defines `wrapping_add` in the following comment.
120/// `wrapping_add` any value of type `Self::Repr::Delta` obtained by [`Self::rhs_into_delta()`] to
121/// any value of type `Self::Repr` obtained through transmuting a value of type `Self` to must
122/// yield a value with a bit pattern also valid for `Self`.
123pub unsafe trait AtomicAdd<Rhs = Self>: AtomicType {
124 /// Converts `Rhs` into the `Delta` type of the atomic implementation.
125 fn rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta;
126}
127
128#[inline(always)]
129const fn into_repr<T: AtomicType>(v: T) -> T::Repr {
130 // SAFETY: Per the safety requirement of `AtomicType`, `T` is round-trip transmutable to
131 // `T::Repr`, therefore the transmute operation is sound.
132 unsafe { core::mem::transmute_copy(&v) }
133}
134
135/// # Safety
136///
137/// `r` must be a valid bit pattern of `T`.
138#[inline(always)]
139const unsafe fn from_repr<T: AtomicType>(r: T::Repr) -> T {
140 // SAFETY: Per the safety requirement of the function, the transmute operation is sound.
141 unsafe { core::mem::transmute_copy(&r) }
142}
143
144impl<T: AtomicType> Atomic<T> {
145 /// Creates a new atomic `T`.
146 pub const fn new(v: T) -> Self {
147 // INVARIANT: Per the safety requirement of `AtomicType`, `into_repr(v)` is a valid `T`.
148 Self(AtomicRepr::new(into_repr(v)))
149 }
150
151 /// Creates a reference to an atomic `T` from a pointer of `T`.
152 ///
153 /// This usually is used when communicating with C side or manipulating a C struct, see
154 /// examples below.
155 ///
156 /// # Safety
157 ///
158 /// - `ptr` is aligned to `align_of::<T>()`.
159 /// - `ptr` is valid for reads and writes for `'a`.
160 /// - For the duration of `'a`, other accesses to `*ptr` must not cause data races (defined
161 /// by [`LKMM`]) against atomic operations on the returned reference. Note that if all other
162 /// accesses are atomic, then this safety requirement is trivially fulfilled.
163 ///
164 /// [`LKMM`]: srctree/tools/memory-model
165 ///
166 /// # Examples
167 ///
168 /// Using [`Atomic::from_ptr()`] combined with [`Atomic::load()`] or [`Atomic::store()`] can
169 /// achieve the same functionality as `READ_ONCE()`/`smp_load_acquire()` or
170 /// `WRITE_ONCE()`/`smp_store_release()` in C side:
171 ///
172 /// ```
173 /// # use kernel::types::Opaque;
174 /// use kernel::sync::atomic::{Atomic, Relaxed, Release};
175 ///
176 /// // Assume there is a C struct `foo`.
177 /// mod cbindings {
178 /// #[repr(C)]
179 /// pub(crate) struct foo {
180 /// pub(crate) a: i32,
181 /// pub(crate) b: i32
182 /// }
183 /// }
184 ///
185 /// let tmp = Opaque::new(cbindings::foo { a: 1, b: 2 });
186 ///
187 /// // struct foo *foo_ptr = ..;
188 /// let foo_ptr = tmp.get();
189 ///
190 /// // SAFETY: `foo_ptr` is valid, and `.a` is in bounds.
191 /// let foo_a_ptr = unsafe { &raw mut (*foo_ptr).a };
192 ///
193 /// // a = READ_ONCE(foo_ptr->a);
194 /// //
195 /// // SAFETY: `foo_a_ptr` is valid for read, and all other accesses on it is atomic, so no
196 /// // data race.
197 /// let a = unsafe { Atomic::from_ptr(foo_a_ptr) }.load(Relaxed);
198 /// # assert_eq!(a, 1);
199 ///
200 /// // smp_store_release(&foo_ptr->a, 2);
201 /// //
202 /// // SAFETY: `foo_a_ptr` is valid for writes, and all other accesses on it is atomic, so
203 /// // no data race.
204 /// unsafe { Atomic::from_ptr(foo_a_ptr) }.store(2, Release);
205 /// ```
206 pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self
207 where
208 T: Sync,
209 {
210 // CAST: `T` and `Atomic<T>` have the same size, alignment and bit validity.
211 // SAFETY: Per function safety requirement, `ptr` is a valid pointer and the object will
212 // live long enough. It's safe to return a `&Atomic<T>` because function safety requirement
213 // guarantees other accesses won't cause data races.
214 unsafe { &*ptr.cast::<Self>() }
215 }
216
217 /// Returns a pointer to the underlying atomic `T`.
218 ///
219 /// Note that use of the return pointer must not cause data races defined by [`LKMM`].
220 ///
221 /// # Guarantees
222 ///
223 /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
224 ///
225 /// [`LKMM`]: srctree/tools/memory-model
226 /// [`align_of::<T>()`]: core::mem::align_of
227 pub const fn as_ptr(&self) -> *mut T {
228 // GUARANTEE: Per the function guarantee of `AtomicRepr::as_ptr()`, the `self.0.as_ptr()`
229 // must be a valid and properly aligned pointer for `T::Repr`, and per the safety guarantee
230 // of `AtomicType`, it's a valid and properly aligned pointer of `T`.
231 self.0.as_ptr().cast()
232 }
233
234 /// Returns a mutable reference to the underlying atomic `T`.
235 ///
236 /// This is safe because the mutable reference of the atomic `T` guarantees exclusive access.
237 pub fn get_mut(&mut self) -> &mut T {
238 // CAST: `T` and `T::Repr` has the same size and alignment per the safety requirement of
239 // `AtomicType`, and per the type invariants `self.0` is a valid `T`, therefore the casting
240 // result is a valid pointer of `T`.
241 // SAFETY: The pointer is valid per the CAST comment above, and the mutable reference
242 // guarantees exclusive access.
243 unsafe { &mut *self.0.as_ptr().cast() }
244 }
245}
246
247impl<T: AtomicType> Atomic<T>
248where
249 T::Repr: AtomicBasicOps,
250{
251 /// Loads the value from the atomic `T`.
252 ///
253 /// # Examples
254 ///
255 /// ```
256 /// use kernel::sync::atomic::{Atomic, Relaxed};
257 ///
258 /// let x = Atomic::new(42i32);
259 ///
260 /// assert_eq!(42, x.load(Relaxed));
261 ///
262 /// let x = Atomic::new(42i64);
263 ///
264 /// assert_eq!(42, x.load(Relaxed));
265 /// ```
266 #[doc(alias("atomic_read", "atomic64_read"))]
267 #[inline(always)]
268 pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T {
269 let v = {
270 match Ordering::TYPE {
271 OrderingType::Relaxed => T::Repr::atomic_read(&self.0),
272 OrderingType::Acquire => T::Repr::atomic_read_acquire(&self.0),
273 _ => build_error!("Wrong ordering"),
274 }
275 };
276
277 // SAFETY: `v` comes from reading `self.0`, which is a valid `T` per the type invariants.
278 unsafe { from_repr(v) }
279 }
280
281 /// Stores a value to the atomic `T`.
282 ///
283 /// # Examples
284 ///
285 /// ```
286 /// use kernel::sync::atomic::{Atomic, Relaxed};
287 ///
288 /// let x = Atomic::new(42i32);
289 ///
290 /// assert_eq!(42, x.load(Relaxed));
291 ///
292 /// x.store(43, Relaxed);
293 ///
294 /// assert_eq!(43, x.load(Relaxed));
295 /// ```
296 #[doc(alias("atomic_set", "atomic64_set"))]
297 #[inline(always)]
298 pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) {
299 let v = into_repr(v);
300
301 // INVARIANT: `v` is a valid `T`, and is stored to `self.0` by `atomic_set*()`.
302 match Ordering::TYPE {
303 OrderingType::Relaxed => T::Repr::atomic_set(&self.0, v),
304 OrderingType::Release => T::Repr::atomic_set_release(&self.0, v),
305 _ => build_error!("Wrong ordering"),
306 }
307 }
308}
309
310impl<T: AtomicType + core::fmt::Debug> core::fmt::Debug for Atomic<T>
311where
312 T::Repr: AtomicBasicOps,
313{
314 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
315 core::fmt::Debug::fmt(&self.load(Relaxed), f)
316 }
317}
318
319impl<T: AtomicType> Atomic<T>
320where
321 T::Repr: AtomicExchangeOps,
322{
323 /// Atomic exchange.
324 ///
325 /// Atomically updates `*self` to `v` and returns the old value of `*self`.
326 ///
327 /// # Examples
328 ///
329 /// ```
330 /// use kernel::sync::atomic::{Atomic, Acquire, Relaxed};
331 ///
332 /// let x = Atomic::new(42);
333 ///
334 /// assert_eq!(42, x.xchg(52, Acquire));
335 /// assert_eq!(52, x.load(Relaxed));
336 /// ```
337 #[doc(alias("atomic_xchg", "atomic64_xchg", "swap"))]
338 #[inline(always)]
339 pub fn xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T {
340 let v = into_repr(v);
341
342 // INVARIANT: `self.0` is a valid `T` after `atomic_xchg*()` because `v` is transmutable to
343 // `T`.
344 let ret = {
345 match Ordering::TYPE {
346 OrderingType::Full => T::Repr::atomic_xchg(&self.0, v),
347 OrderingType::Acquire => T::Repr::atomic_xchg_acquire(&self.0, v),
348 OrderingType::Release => T::Repr::atomic_xchg_release(&self.0, v),
349 OrderingType::Relaxed => T::Repr::atomic_xchg_relaxed(&self.0, v),
350 }
351 };
352
353 // SAFETY: `ret` comes from reading `*self`, which is a valid `T` per type invariants.
354 unsafe { from_repr(ret) }
355 }
356
357 /// Atomic compare and exchange.
358 ///
359 /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
360 /// modified.
361 ///
362 /// Compare: The comparison is done via the byte level comparison between `*self` and `old`.
363 ///
364 /// Ordering: When succeeds, provides the corresponding ordering as the `Ordering` type
365 /// parameter indicates, and a failed one doesn't provide any ordering, the load part of a
366 /// failed cmpxchg is a [`Relaxed`] load.
367 ///
368 /// Returns `Ok(value)` if cmpxchg succeeds, and `value` is guaranteed to be equal to `old`,
369 /// otherwise returns `Err(value)`, and `value` is the current value of `*self`.
370 ///
371 /// # Examples
372 ///
373 /// ```
374 /// use kernel::sync::atomic::{Atomic, Full, Relaxed};
375 ///
376 /// let x = Atomic::new(42);
377 ///
378 /// // Checks whether cmpxchg succeeded.
379 /// let success = x.cmpxchg(52, 64, Relaxed).is_ok();
380 /// # assert!(!success);
381 ///
382 /// // Checks whether cmpxchg failed.
383 /// let failure = x.cmpxchg(52, 64, Relaxed).is_err();
384 /// # assert!(failure);
385 ///
386 /// // Uses the old value if failed, probably re-try cmpxchg.
387 /// match x.cmpxchg(52, 64, Relaxed) {
388 /// Ok(_) => { },
389 /// Err(old) => {
390 /// // do something with `old`.
391 /// # assert_eq!(old, 42);
392 /// }
393 /// }
394 ///
395 /// // Uses the latest value regardlessly, same as atomic_cmpxchg() in C.
396 /// let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
397 /// # assert_eq!(42, latest);
398 /// assert_eq!(64, x.load(Relaxed));
399 /// ```
400 ///
401 /// [`Relaxed`]: ordering::Relaxed
402 #[doc(alias(
403 "atomic_cmpxchg",
404 "atomic64_cmpxchg",
405 "atomic_try_cmpxchg",
406 "atomic64_try_cmpxchg",
407 "compare_exchange"
408 ))]
409 #[inline(always)]
410 pub fn cmpxchg<Ordering: ordering::Ordering>(
411 &self,
412 mut old: T,
413 new: T,
414 o: Ordering,
415 ) -> Result<T, T> {
416 // Note on code generation:
417 //
418 // try_cmpxchg() is used to implement cmpxchg(), and if the helper functions are inlined,
419 // the compiler is able to figure out that branch is not needed if the users don't care
420 // about whether the operation succeeds or not. One exception is on x86, due to commit
421 // 44fe84459faf ("locking/atomic: Fix atomic_try_cmpxchg() semantics"), the
422 // atomic_try_cmpxchg() on x86 has a branch even if the caller doesn't care about the
423 // success of cmpxchg and only wants to use the old value. For example, for code like:
424 //
425 // let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
426 //
427 // It will still generate code:
428 //
429 // movl $0x40, %ecx
430 // movl $0x34, %eax
431 // lock
432 // cmpxchgl %ecx, 0x4(%rsp)
433 // jne 1f
434 // 2:
435 // ...
436 // 1: movl %eax, %ecx
437 // jmp 2b
438 //
439 // This might be "fixed" by introducing a try_cmpxchg_exclusive() that knows the "*old"
440 // location in the C function is always safe to write.
441 if self.try_cmpxchg(&mut old, new, o) {
442 Ok(old)
443 } else {
444 Err(old)
445 }
446 }
447
448 /// Atomic compare and exchange and returns whether the operation succeeds.
449 ///
450 /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
451 /// modified, `*old` is updated to the current value of `*self`.
452 ///
453 /// "Compare" and "Ordering" part are the same as [`Atomic::cmpxchg()`].
454 ///
455 /// Returns `true` means the cmpxchg succeeds otherwise returns `false`.
456 #[inline(always)]
457 fn try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool {
458 let mut tmp = into_repr(*old);
459 let new = into_repr(new);
460
461 // INVARIANT: `self.0` is a valid `T` after `atomic_try_cmpxchg*()` because `new` is
462 // transmutable to `T`.
463 let ret = {
464 match Ordering::TYPE {
465 OrderingType::Full => T::Repr::atomic_try_cmpxchg(&self.0, &mut tmp, new),
466 OrderingType::Acquire => {
467 T::Repr::atomic_try_cmpxchg_acquire(&self.0, &mut tmp, new)
468 }
469 OrderingType::Release => {
470 T::Repr::atomic_try_cmpxchg_release(&self.0, &mut tmp, new)
471 }
472 OrderingType::Relaxed => {
473 T::Repr::atomic_try_cmpxchg_relaxed(&self.0, &mut tmp, new)
474 }
475 }
476 };
477
478 // SAFETY: `tmp` comes from reading `*self`, which is a valid `T` per type invariants.
479 *old = unsafe { from_repr(tmp) };
480
481 ret
482 }
483}
484
485impl<T: AtomicType> Atomic<T>
486where
487 T::Repr: AtomicArithmeticOps,
488{
489 /// Atomic add.
490 ///
491 /// Atomically updates `*self` to `(*self).wrapping_add(v)`.
492 ///
493 /// # Examples
494 ///
495 /// ```
496 /// use kernel::sync::atomic::{Atomic, Relaxed};
497 ///
498 /// let x = Atomic::new(42);
499 ///
500 /// assert_eq!(42, x.load(Relaxed));
501 ///
502 /// x.add(12, Relaxed);
503 ///
504 /// assert_eq!(54, x.load(Relaxed));
505 /// ```
506 #[inline(always)]
507 pub fn add<Rhs>(&self, v: Rhs, _: ordering::Relaxed)
508 where
509 T: AtomicAdd<Rhs>,
510 {
511 let v = T::rhs_into_delta(v);
512
513 // INVARIANT: `self.0` is a valid `T` after `atomic_add()` due to safety requirement of
514 // `AtomicAdd`.
515 T::Repr::atomic_add(&self.0, v);
516 }
517
518 /// Atomic fetch and add.
519 ///
520 /// Atomically updates `*self` to `(*self).wrapping_add(v)`, and returns the value of `*self`
521 /// before the update.
522 ///
523 /// # Examples
524 ///
525 /// ```
526 /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed};
527 ///
528 /// let x = Atomic::new(42);
529 ///
530 /// assert_eq!(42, x.load(Relaxed));
531 ///
532 /// assert_eq!(54, { x.fetch_add(12, Acquire); x.load(Relaxed) });
533 ///
534 /// let x = Atomic::new(42);
535 ///
536 /// assert_eq!(42, x.load(Relaxed));
537 ///
538 /// assert_eq!(54, { x.fetch_add(12, Full); x.load(Relaxed) } );
539 /// ```
540 #[inline(always)]
541 pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T
542 where
543 T: AtomicAdd<Rhs>,
544 {
545 let v = T::rhs_into_delta(v);
546
547 // INVARIANT: `self.0` is a valid `T` after `atomic_fetch_add*()` due to safety requirement
548 // of `AtomicAdd`.
549 let ret = {
550 match Ordering::TYPE {
551 OrderingType::Full => T::Repr::atomic_fetch_add(&self.0, v),
552 OrderingType::Acquire => T::Repr::atomic_fetch_add_acquire(&self.0, v),
553 OrderingType::Release => T::Repr::atomic_fetch_add_release(&self.0, v),
554 OrderingType::Relaxed => T::Repr::atomic_fetch_add_relaxed(&self.0, v),
555 }
556 };
557
558 // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants.
559 unsafe { from_repr(ret) }
560 }
561}