kernel/sync/lock.rs
1// SPDX-License-Identifier: GPL-2.0
2
3//! Generic kernel lock and guard.
4//!
5//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6//! spinlocks, raw spinlocks) to be provided with minimal effort.
7
8use super::LockClassKey;
9use crate::{
10 str::{CStr, CStrExt as _},
11 types::{NotThreadSafe, Opaque, ScopeGuard},
12};
13use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
14use pin_init::{pin_data, pin_init, PinInit, Wrapper};
15
16pub mod mutex;
17pub mod spinlock;
18
19pub(super) mod global;
20pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
21
22/// The "backend" of a lock.
23///
24/// It is the actual implementation of the lock, without the need to repeat patterns used in all
25/// locks.
26///
27/// # Safety
28///
29/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
30/// is owned, that is, between calls to [`lock`] and [`unlock`].
31/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
32/// lock operation.
33///
34/// [`lock`]: Backend::lock
35/// [`unlock`]: Backend::unlock
36/// [`relock`]: Backend::relock
37pub unsafe trait Backend {
38 /// The state required by the lock.
39 type State;
40
41 /// The state required to be kept between [`lock`] and [`unlock`].
42 ///
43 /// [`lock`]: Backend::lock
44 /// [`unlock`]: Backend::unlock
45 type GuardState;
46
47 /// Initialises the lock.
48 ///
49 /// # Safety
50 ///
51 /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
52 /// remain valid for read indefinitely.
53 unsafe fn init(
54 ptr: *mut Self::State,
55 name: *const crate::ffi::c_char,
56 key: *mut bindings::lock_class_key,
57 );
58
59 /// Acquires the lock, making the caller its owner.
60 ///
61 /// # Safety
62 ///
63 /// Callers must ensure that [`Backend::init`] has been previously called.
64 #[must_use]
65 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
66
67 /// Tries to acquire the lock.
68 ///
69 /// # Safety
70 ///
71 /// Callers must ensure that [`Backend::init`] has been previously called.
72 unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
73
74 /// Releases the lock, giving up its ownership.
75 ///
76 /// # Safety
77 ///
78 /// It must only be called by the current owner of the lock.
79 unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
80
81 /// Reacquires the lock, making the caller its owner.
82 ///
83 /// # Safety
84 ///
85 /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
86 /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
87 unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
88 // SAFETY: The safety requirements ensure that the lock is initialised.
89 *guard_state = unsafe { Self::lock(ptr) };
90 }
91
92 /// Asserts that the lock is held using lockdep.
93 ///
94 /// # Safety
95 ///
96 /// Callers must ensure that [`Backend::init`] has been previously called.
97 unsafe fn assert_is_held(ptr: *mut Self::State);
98}
99
100/// A mutual exclusion primitive.
101///
102/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
103/// [`Backend`] specified as the generic parameter `B`.
104#[repr(C)]
105#[pin_data]
106pub struct Lock<T: ?Sized, B: Backend> {
107 /// The kernel lock object.
108 #[pin]
109 state: Opaque<B::State>,
110
111 /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
112 /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
113 /// some architecture uses self-references now or in the future.
114 #[pin]
115 _pin: PhantomPinned,
116
117 /// The data protected by the lock.
118 #[pin]
119 pub(crate) data: UnsafeCell<T>,
120}
121
122// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
123unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
124
125// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
126// data it protects is `Send`.
127unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
128
129impl<T, B: Backend> Lock<T, B> {
130 /// Constructs a new lock initialiser.
131 pub fn new(
132 t: impl PinInit<T>,
133 name: &'static CStr,
134 key: Pin<&'static LockClassKey>,
135 ) -> impl PinInit<Self> {
136 pin_init!(Self {
137 data <- UnsafeCell::pin_init(t),
138 _pin: PhantomPinned,
139 // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
140 // static lifetimes so they live indefinitely.
141 state <- Opaque::ffi_init(|slot| unsafe {
142 B::init(slot, name.as_char_ptr(), key.as_ptr())
143 }),
144 })
145 }
146}
147
148impl<B: Backend> Lock<(), B> {
149 /// Constructs a [`Lock`] from a raw pointer.
150 ///
151 /// This can be useful for interacting with a lock which was initialised outside of Rust.
152 ///
153 /// # Safety
154 ///
155 /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
156 /// the whole lifetime of `'a`.
157 ///
158 /// [`State`]: Backend::State
159 #[inline]
160 pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
161 // SAFETY:
162 // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
163 // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
164 // the struct
165 // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
166 // `B::State`.
167 unsafe { &*ptr.cast() }
168 }
169}
170
171impl<T: ?Sized, B: Backend> Lock<T, B> {
172 /// Acquires the lock and gives the caller access to the data protected by it.
173 #[inline]
174 pub fn lock(&self) -> Guard<'_, T, B> {
175 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
176 // that `init` was called.
177 let state = unsafe { B::lock(self.state.get()) };
178 // SAFETY: The lock was just acquired.
179 unsafe { Guard::new(self, state) }
180 }
181
182 /// Tries to acquire the lock.
183 ///
184 /// Returns a guard that can be used to access the data protected by the lock if successful.
185 // `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
186 #[must_use = "if unused, the lock will be immediately unlocked"]
187 #[inline]
188 pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
189 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
190 // that `init` was called.
191 unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
192 }
193}
194
195/// A lock guard.
196///
197/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
198/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
199/// protected by the lock.
200#[must_use = "the lock unlocks immediately when the guard is unused"]
201pub struct Guard<'a, T: ?Sized, B: Backend> {
202 pub(crate) lock: &'a Lock<T, B>,
203 pub(crate) state: B::GuardState,
204 _not_send: NotThreadSafe,
205}
206
207// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
208unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
209
210impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
211 /// Returns the lock that this guard originates from.
212 ///
213 /// # Examples
214 ///
215 /// The following example shows how to use [`Guard::lock_ref()`] to assert the corresponding
216 /// lock is held.
217 ///
218 /// ```
219 /// # use kernel::{new_spinlock, sync::lock::{Backend, Guard, Lock}};
220 /// # use pin_init::stack_pin_init;
221 ///
222 /// fn assert_held<T, B: Backend>(guard: &Guard<'_, T, B>, lock: &Lock<T, B>) {
223 /// // Address-equal means the same lock.
224 /// assert!(core::ptr::eq(guard.lock_ref(), lock));
225 /// }
226 ///
227 /// // Creates a new lock on the stack.
228 /// stack_pin_init!{
229 /// let l = new_spinlock!(42)
230 /// }
231 ///
232 /// let g = l.lock();
233 ///
234 /// // `g` originates from `l`.
235 /// assert_held(&g, &l);
236 /// ```
237 pub fn lock_ref(&self) -> &'a Lock<T, B> {
238 self.lock
239 }
240
241 pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
242 // SAFETY: The caller owns the lock, so it is safe to unlock it.
243 unsafe { B::unlock(self.lock.state.get(), &self.state) };
244
245 let _relock = ScopeGuard::new(||
246 // SAFETY: The lock was just unlocked above and is being relocked now.
247 unsafe { B::relock(self.lock.state.get(), &mut self.state) });
248
249 cb()
250 }
251
252 /// Returns a pinned mutable reference to the protected data.
253 ///
254 /// The guard implements [`DerefMut`] when `T: Unpin`, so for [`Unpin`]
255 /// types [`DerefMut`] should be used instead of this function.
256 ///
257 /// [`DerefMut`]: core::ops::DerefMut
258 /// [`Unpin`]: core::marker::Unpin
259 ///
260 /// # Examples
261 ///
262 /// ```
263 /// # use kernel::sync::{Mutex, MutexGuard};
264 /// # use core::{pin::Pin, marker::PhantomPinned};
265 /// struct Data(PhantomPinned);
266 ///
267 /// fn example(mutex: &Mutex<Data>) {
268 /// let mut data: MutexGuard<'_, Data> = mutex.lock();
269 /// let mut data: Pin<&mut Data> = data.as_mut();
270 /// }
271 /// ```
272 pub fn as_mut(&mut self) -> Pin<&mut T> {
273 // SAFETY: `self.lock.data` is structurally pinned.
274 unsafe { Pin::new_unchecked(&mut *self.lock.data.get()) }
275 }
276}
277
278impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
279 type Target = T;
280
281 #[inline]
282 fn deref(&self) -> &Self::Target {
283 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
284 unsafe { &*self.lock.data.get() }
285 }
286}
287
288impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
289where
290 T: Unpin,
291{
292 #[inline]
293 fn deref_mut(&mut self) -> &mut Self::Target {
294 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
295 unsafe { &mut *self.lock.data.get() }
296 }
297}
298
299impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
300 #[inline]
301 fn drop(&mut self) {
302 // SAFETY: The caller owns the lock, so it is safe to unlock it.
303 unsafe { B::unlock(self.lock.state.get(), &self.state) };
304 }
305}
306
307impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
308 /// Constructs a new immutable lock guard.
309 ///
310 /// # Safety
311 ///
312 /// The caller must ensure that it owns the lock.
313 #[inline]
314 pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
315 // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
316 unsafe { B::assert_is_held(lock.state.get()) };
317
318 Self {
319 lock,
320 state,
321 _not_send: NotThreadSafe,
322 }
323 }
324}