kernel/block/mq/request.rs
1// SPDX-License-Identifier: GPL-2.0
2
3//! This module provides a wrapper for the C `struct request` type.
4//!
5//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
6
7use crate::{
8 bindings,
9 block::mq::Operations,
10 error::Result,
11 types::{ARef, AlwaysRefCounted, Opaque},
12};
13use core::{
14 marker::PhantomData,
15 ptr::NonNull,
16 sync::atomic::{AtomicU64, Ordering},
17};
18
19/// A wrapper around a blk-mq [`struct request`]. This represents an IO request.
20///
21/// # Implementation details
22///
23/// There are four states for a request that the Rust bindings care about:
24///
25/// 1. Request is owned by block layer (refcount 0).
26/// 2. Request is owned by driver but with zero [`ARef`]s in existence
27/// (refcount 1).
28/// 3. Request is owned by driver with exactly one [`ARef`] in existence
29/// (refcount 2).
30/// 4. Request is owned by driver with more than one [`ARef`] in existence
31/// (refcount > 2).
32///
33///
34/// We need to track 1 and 2 to ensure we fail tag to request conversions for
35/// requests that are not owned by the driver.
36///
37/// We need to track 3 and 4 to ensure that it is safe to end the request and hand
38/// back ownership to the block layer.
39///
40/// The states are tracked through the private `refcount` field of
41/// `RequestDataWrapper`. This structure lives in the private data area of the C
42/// [`struct request`].
43///
44/// # Invariants
45///
46/// * `self.0` is a valid [`struct request`] created by the C portion of the
47/// kernel.
48/// * The private data area associated with this request must be an initialized
49/// and valid `RequestDataWrapper<T>`.
50/// * `self` is reference counted by atomic modification of
51/// `self.wrapper_ref().refcount()`.
52///
53/// [`struct request`]: srctree/include/linux/blk-mq.h
54///
55#[repr(transparent)]
56pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);
57
58impl<T: Operations> Request<T> {
59 /// Create an [`ARef<Request>`] from a [`struct request`] pointer.
60 ///
61 /// # Safety
62 ///
63 /// * The caller must own a refcount on `ptr` that is transferred to the
64 /// returned [`ARef`].
65 /// * The type invariants for [`Request`] must hold for the pointee of `ptr`.
66 ///
67 /// [`struct request`]: srctree/include/linux/blk-mq.h
68 pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> {
69 // INVARIANT: By the safety requirements of this function, invariants are upheld.
70 // SAFETY: By the safety requirement of this function, we own a
71 // reference count that we can pass to `ARef`.
72 unsafe { ARef::from_raw(NonNull::new_unchecked(ptr.cast())) }
73 }
74
75 /// Notify the block layer that a request is going to be processed now.
76 ///
77 /// The block layer uses this hook to do proper initializations such as
78 /// starting the timeout timer. It is a requirement that block device
79 /// drivers call this function when starting to process a request.
80 ///
81 /// # Safety
82 ///
83 /// The caller must have exclusive ownership of `self`, that is
84 /// `self.wrapper_ref().refcount() == 2`.
85 pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) {
86 // SAFETY: By type invariant, `self.0` is a valid `struct request` and
87 // we have exclusive access.
88 unsafe { bindings::blk_mq_start_request(this.0.get()) };
89 }
90
91 /// Try to take exclusive ownership of `this` by dropping the refcount to 0.
92 /// This fails if `this` is not the only [`ARef`] pointing to the underlying
93 /// [`Request`].
94 ///
95 /// If the operation is successful, [`Ok`] is returned with a pointer to the
96 /// C [`struct request`]. If the operation fails, `this` is returned in the
97 /// [`Err`] variant.
98 ///
99 /// [`struct request`]: srctree/include/linux/blk-mq.h
100 fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
101 // We can race with `TagSet::tag_to_rq`
102 if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
103 2,
104 0,
105 Ordering::Relaxed,
106 Ordering::Relaxed,
107 ) {
108 return Err(this);
109 }
110
111 let request_ptr = this.0.get();
112 core::mem::forget(this);
113
114 Ok(request_ptr)
115 }
116
117 /// Notify the block layer that the request has been completed without errors.
118 ///
119 /// This function will return [`Err`] if `this` is not the only [`ARef`]
120 /// referencing the request.
121 pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {
122 let request_ptr = Self::try_set_end(this)?;
123
124 // SAFETY: By type invariant, `this.0` was a valid `struct request`. The
125 // success of the call to `try_set_end` guarantees that there are no
126 // `ARef`s pointing to this request. Therefore it is safe to hand it
127 // back to the block layer.
128 unsafe {
129 bindings::blk_mq_end_request(
130 request_ptr,
131 bindings::BLK_STS_OK as bindings::blk_status_t,
132 )
133 };
134
135 Ok(())
136 }
137
138 /// Return a pointer to the [`RequestDataWrapper`] stored in the private area
139 /// of the request structure.
140 ///
141 /// # Safety
142 ///
143 /// - `this` must point to a valid allocation of size at least size of
144 /// [`Self`] plus size of [`RequestDataWrapper`].
145 pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> {
146 let request_ptr = this.cast::<bindings::request>();
147 // SAFETY: By safety requirements for this function, `this` is a
148 // valid allocation.
149 let wrapper_ptr =
150 unsafe { bindings::blk_mq_rq_to_pdu(request_ptr).cast::<RequestDataWrapper>() };
151 // SAFETY: By C API contract, wrapper_ptr points to a valid allocation
152 // and is not null.
153 unsafe { NonNull::new_unchecked(wrapper_ptr) }
154 }
155
156 /// Return a reference to the [`RequestDataWrapper`] stored in the private
157 /// area of the request structure.
158 pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {
159 // SAFETY: By type invariant, `self.0` is a valid allocation. Further,
160 // the private data associated with this request is initialized and
161 // valid. The existence of `&self` guarantees that the private data is
162 // valid as a shared reference.
163 unsafe { Self::wrapper_ptr(core::ptr::from_ref(self).cast_mut()).as_ref() }
164 }
165}
166
167/// A wrapper around data stored in the private area of the C [`struct request`].
168///
169/// [`struct request`]: srctree/include/linux/blk-mq.h
170pub(crate) struct RequestDataWrapper {
171 /// The Rust request refcount has the following states:
172 ///
173 /// - 0: The request is owned by C block layer.
174 /// - 1: The request is owned by Rust abstractions but there are no [`ARef`] references to it.
175 /// - 2+: There are [`ARef`] references to the request.
176 refcount: AtomicU64,
177}
178
179impl RequestDataWrapper {
180 /// Return a reference to the refcount of the request that is embedding
181 /// `self`.
182 pub(crate) fn refcount(&self) -> &AtomicU64 {
183 &self.refcount
184 }
185
186 /// Return a pointer to the refcount of the request that is embedding the
187 /// pointee of `this`.
188 ///
189 /// # Safety
190 ///
191 /// - `this` must point to a live allocation of at least the size of `Self`.
192 pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 {
193 // SAFETY: Because of the safety requirements of this function, the
194 // field projection is safe.
195 unsafe { &raw mut (*this).refcount }
196 }
197}
198
199// SAFETY: Exclusive access is thread-safe for `Request`. `Request` has no `&mut
200// self` methods and `&self` methods that mutate `self` are internally
201// synchronized.
202unsafe impl<T: Operations> Send for Request<T> {}
203
204// SAFETY: Shared access is thread-safe for `Request`. `&self` methods that
205// mutate `self` are internally synchronized`
206unsafe impl<T: Operations> Sync for Request<T> {}
207
208/// Store the result of `op(target.load())` in target, returning new value of
209/// target.
210fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 {
211 let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x)));
212
213 // SAFETY: Because the operation passed to `fetch_update` above always
214 // return `Some`, `old` will always be `Ok`.
215 let old = unsafe { old.unwrap_unchecked() };
216
217 op(old)
218}
219
220/// Store the result of `op(target.load)` in `target` if `target.load() !=
221/// pred`, returning [`true`] if the target was updated.
222fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
223 target
224 .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
225 if x == pred {
226 None
227 } else {
228 Some(op(x))
229 }
230 })
231 .is_ok()
232}
233
234// SAFETY: All instances of `Request<T>` are reference counted. This
235// implementation of `AlwaysRefCounted` ensure that increments to the ref count
236// keeps the object alive in memory at least until a matching reference count
237// decrement is executed.
238unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
239 fn inc_ref(&self) {
240 let refcount = &self.wrapper_ref().refcount();
241
242 #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
243 let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0);
244
245 #[cfg(CONFIG_DEBUG_MISC)]
246 if !updated {
247 panic!("Request refcount zero on clone")
248 }
249 }
250
251 unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
252 // SAFETY: The type invariants of `ARef` guarantee that `obj` is valid
253 // for read.
254 let wrapper_ptr = unsafe { Self::wrapper_ptr(obj.as_ptr()).as_ptr() };
255 // SAFETY: The type invariant of `Request` guarantees that the private
256 // data area is initialized and valid.
257 let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) };
258
259 #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
260 let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1);
261
262 #[cfg(CONFIG_DEBUG_MISC)]
263 if new_refcount == 0 {
264 panic!("Request reached refcount zero in Rust abstractions");
265 }
266 }
267}