kernel/
io.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Memory-mapped IO.
4//!
5//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6
7use crate::error::{code::EINVAL, Result};
8use crate::{bindings, build_assert, ffi::c_void};
9
10pub mod mem;
11pub mod poll;
12pub mod resource;
13
14pub use resource::Resource;
15
16/// Raw representation of an MMIO region.
17///
18/// By itself, the existence of an instance of this structure does not provide any guarantees that
19/// the represented MMIO region does exist or is properly mapped.
20///
21/// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
22/// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
23/// any guarantees are given.
24pub struct IoRaw<const SIZE: usize = 0> {
25    addr: usize,
26    maxsize: usize,
27}
28
29impl<const SIZE: usize> IoRaw<SIZE> {
30    /// Returns a new `IoRaw` instance on success, an error otherwise.
31    pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
32        if maxsize < SIZE {
33            return Err(EINVAL);
34        }
35
36        Ok(Self { addr, maxsize })
37    }
38
39    /// Returns the base address of the MMIO region.
40    #[inline]
41    pub fn addr(&self) -> usize {
42        self.addr
43    }
44
45    /// Returns the maximum size of the MMIO region.
46    #[inline]
47    pub fn maxsize(&self) -> usize {
48        self.maxsize
49    }
50}
51
52/// IO-mapped memory region.
53///
54/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
55/// mapping, performing an additional region request etc.
56///
57/// # Invariant
58///
59/// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
60/// `maxsize`.
61///
62/// # Examples
63///
64/// ```no_run
65/// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
66/// # use core::ops::Deref;
67///
68/// // See also [`pci::Bar`] for a real example.
69/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
70///
71/// impl<const SIZE: usize> IoMem<SIZE> {
72///     /// # Safety
73///     ///
74///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
75///     /// virtual address space.
76///     unsafe fn new(paddr: usize) -> Result<Self>{
77///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
78///         // valid for `ioremap`.
79///         let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
80///         if addr.is_null() {
81///             return Err(ENOMEM);
82///         }
83///
84///         Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
85///     }
86/// }
87///
88/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
89///     fn drop(&mut self) {
90///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
91///         unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
92///     }
93/// }
94///
95/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
96///    type Target = Io<SIZE>;
97///
98///    fn deref(&self) -> &Self::Target {
99///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
100///         unsafe { Io::from_raw(&self.0) }
101///    }
102/// }
103///
104///# fn no_run() -> Result<(), Error> {
105/// // SAFETY: Invalid usage for example purposes.
106/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
107/// iomem.write32(0x42, 0x0);
108/// assert!(iomem.try_write32(0x42, 0x0).is_ok());
109/// assert!(iomem.try_write32(0x42, 0x4).is_err());
110/// # Ok(())
111/// # }
112/// ```
113#[repr(transparent)]
114pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
115
116macro_rules! define_read {
117    ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
118        /// Read IO data from a given offset known at compile time.
119        ///
120        /// Bound checks are performed on compile time, hence if the offset is not known at compile
121        /// time, the build will fail.
122        $(#[$attr])*
123        #[inline]
124        pub fn $name(&self, offset: usize) -> $type_name {
125            let addr = self.io_addr_assert::<$type_name>(offset);
126
127            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
128            unsafe { bindings::$c_fn(addr as *const c_void) }
129        }
130
131        /// Read IO data from a given offset.
132        ///
133        /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
134        /// out of bounds.
135        $(#[$attr])*
136        pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
137            let addr = self.io_addr::<$type_name>(offset)?;
138
139            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
140            Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
141        }
142    };
143}
144
145macro_rules! define_write {
146    ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
147        /// Write IO data from a given offset known at compile time.
148        ///
149        /// Bound checks are performed on compile time, hence if the offset is not known at compile
150        /// time, the build will fail.
151        $(#[$attr])*
152        #[inline]
153        pub fn $name(&self, value: $type_name, offset: usize) {
154            let addr = self.io_addr_assert::<$type_name>(offset);
155
156            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
157            unsafe { bindings::$c_fn(value, addr as *mut c_void) }
158        }
159
160        /// Write IO data from a given offset.
161        ///
162        /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
163        /// out of bounds.
164        $(#[$attr])*
165        pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
166            let addr = self.io_addr::<$type_name>(offset)?;
167
168            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
169            unsafe { bindings::$c_fn(value, addr as *mut c_void) }
170            Ok(())
171        }
172    };
173}
174
175impl<const SIZE: usize> Io<SIZE> {
176    /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
177    ///
178    /// # Safety
179    ///
180    /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
181    /// `maxsize`.
182    pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
183        // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
184        unsafe { &*core::ptr::from_ref(raw).cast() }
185    }
186
187    /// Returns the base address of this mapping.
188    #[inline]
189    pub fn addr(&self) -> usize {
190        self.0.addr()
191    }
192
193    /// Returns the maximum size of this mapping.
194    #[inline]
195    pub fn maxsize(&self) -> usize {
196        self.0.maxsize()
197    }
198
199    #[inline]
200    const fn offset_valid<U>(offset: usize, size: usize) -> bool {
201        let type_size = core::mem::size_of::<U>();
202        if let Some(end) = offset.checked_add(type_size) {
203            end <= size && offset % type_size == 0
204        } else {
205            false
206        }
207    }
208
209    #[inline]
210    fn io_addr<U>(&self, offset: usize) -> Result<usize> {
211        if !Self::offset_valid::<U>(offset, self.maxsize()) {
212            return Err(EINVAL);
213        }
214
215        // Probably no need to check, since the safety requirements of `Self::new` guarantee that
216        // this can't overflow.
217        self.addr().checked_add(offset).ok_or(EINVAL)
218    }
219
220    #[inline]
221    fn io_addr_assert<U>(&self, offset: usize) -> usize {
222        build_assert!(Self::offset_valid::<U>(offset, SIZE));
223
224        self.addr() + offset
225    }
226
227    define_read!(read8, try_read8, readb -> u8);
228    define_read!(read16, try_read16, readw -> u16);
229    define_read!(read32, try_read32, readl -> u32);
230    define_read!(
231        #[cfg(CONFIG_64BIT)]
232        read64,
233        try_read64,
234        readq -> u64
235    );
236
237    define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
238    define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
239    define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
240    define_read!(
241        #[cfg(CONFIG_64BIT)]
242        read64_relaxed,
243        try_read64_relaxed,
244        readq_relaxed -> u64
245    );
246
247    define_write!(write8, try_write8, writeb <- u8);
248    define_write!(write16, try_write16, writew <- u16);
249    define_write!(write32, try_write32, writel <- u32);
250    define_write!(
251        #[cfg(CONFIG_64BIT)]
252        write64,
253        try_write64,
254        writeq <- u64
255    );
256
257    define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
258    define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
259    define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
260    define_write!(
261        #[cfg(CONFIG_64BIT)]
262        write64_relaxed,
263        try_write64_relaxed,
264        writeq_relaxed <- u64
265    );
266}