kernel/bitmap.rs
1// SPDX-License-Identifier: GPL-2.0
2
3// Copyright (C) 2025 Google LLC.
4
5//! Rust API for bitmap.
6//!
7//! C headers: [`include/linux/bitmap.h`](srctree/include/linux/bitmap.h).
8
9use crate::alloc::{AllocError, Flags};
10use crate::bindings;
11#[cfg(not(CONFIG_RUST_BITMAP_HARDENED))]
12use crate::pr_err;
13use core::ptr::NonNull;
14
15const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
16
17/// Represents a C bitmap. Wraps underlying C bitmap API.
18///
19/// # Invariants
20///
21/// Must reference a `[c_ulong]` long enough to fit `data.len()` bits.
22#[cfg_attr(CONFIG_64BIT, repr(align(8)))]
23#[cfg_attr(not(CONFIG_64BIT), repr(align(4)))]
24pub struct Bitmap {
25 data: [()],
26}
27
28impl Bitmap {
29 /// Borrows a C bitmap.
30 ///
31 /// # Safety
32 ///
33 /// * `ptr` holds a non-null address of an initialized array of `unsigned long`
34 /// that is large enough to hold `nbits` bits.
35 /// * the array must not be freed for the lifetime of this [`Bitmap`]
36 /// * concurrent access only happens through atomic operations
37 pub unsafe fn from_raw<'a>(ptr: *const usize, nbits: usize) -> &'a Bitmap {
38 let data: *const [()] = core::ptr::slice_from_raw_parts(ptr.cast(), nbits);
39 // INVARIANT: `data` references an initialized array that can hold `nbits` bits.
40 // SAFETY:
41 // The caller guarantees that `data` (derived from `ptr` and `nbits`)
42 // points to a valid, initialized, and appropriately sized memory region
43 // that will not be freed for the lifetime 'a.
44 // We are casting `*const [()]` to `*const Bitmap`. The `Bitmap`
45 // struct is a ZST with a `data: [()]` field. This means its layout
46 // is compatible with a slice of `()`, and effectively it's a "thin pointer"
47 // (its size is 0 and alignment is 1). The `slice_from_raw_parts`
48 // function correctly encodes the length (number of bits, not elements)
49 // into the metadata of the fat pointer. Therefore, dereferencing this
50 // pointer as `&Bitmap` is safe given the caller's guarantees.
51 unsafe { &*(data as *const Bitmap) }
52 }
53
54 /// Borrows a C bitmap exclusively.
55 ///
56 /// # Safety
57 ///
58 /// * `ptr` holds a non-null address of an initialized array of `unsigned long`
59 /// that is large enough to hold `nbits` bits.
60 /// * the array must not be freed for the lifetime of this [`Bitmap`]
61 /// * no concurrent access may happen.
62 pub unsafe fn from_raw_mut<'a>(ptr: *mut usize, nbits: usize) -> &'a mut Bitmap {
63 let data: *mut [()] = core::ptr::slice_from_raw_parts_mut(ptr.cast(), nbits);
64 // INVARIANT: `data` references an initialized array that can hold `nbits` bits.
65 // SAFETY:
66 // The caller guarantees that `data` (derived from `ptr` and `nbits`)
67 // points to a valid, initialized, and appropriately sized memory region
68 // that will not be freed for the lifetime 'a.
69 // Furthermore, the caller guarantees no concurrent access will happen,
70 // which upholds the exclusivity requirement for a mutable reference.
71 // Similar to `from_raw`, casting `*mut [()]` to `*mut Bitmap` is
72 // safe because `Bitmap` is a ZST with a `data: [()]` field,
73 // making its layout compatible with a slice of `()`.
74 unsafe { &mut *(data as *mut Bitmap) }
75 }
76
77 /// Returns a raw pointer to the backing [`Bitmap`].
78 pub fn as_ptr(&self) -> *const usize {
79 core::ptr::from_ref::<Bitmap>(self).cast::<usize>()
80 }
81
82 /// Returns a mutable raw pointer to the backing [`Bitmap`].
83 pub fn as_mut_ptr(&mut self) -> *mut usize {
84 core::ptr::from_mut::<Bitmap>(self).cast::<usize>()
85 }
86
87 /// Returns length of this [`Bitmap`].
88 #[expect(clippy::len_without_is_empty)]
89 pub fn len(&self) -> usize {
90 self.data.len()
91 }
92}
93
94/// Holds either a pointer to array of `unsigned long` or a small bitmap.
95#[repr(C)]
96union BitmapRepr {
97 bitmap: usize,
98 ptr: NonNull<usize>,
99}
100
101macro_rules! bitmap_assert {
102 ($cond:expr, $($arg:tt)+) => {
103 #[cfg(CONFIG_RUST_BITMAP_HARDENED)]
104 assert!($cond, $($arg)*);
105 }
106}
107
108macro_rules! bitmap_assert_return {
109 ($cond:expr, $($arg:tt)+) => {
110 #[cfg(CONFIG_RUST_BITMAP_HARDENED)]
111 assert!($cond, $($arg)*);
112
113 #[cfg(not(CONFIG_RUST_BITMAP_HARDENED))]
114 if !($cond) {
115 pr_err!($($arg)*);
116 return
117 }
118 }
119}
120
121/// Represents an owned bitmap.
122///
123/// Wraps underlying C bitmap API. See [`Bitmap`] for available
124/// methods.
125///
126/// # Examples
127///
128/// Basic usage
129///
130/// ```
131/// use kernel::alloc::flags::GFP_KERNEL;
132/// use kernel::bitmap::BitmapVec;
133///
134/// let mut b = BitmapVec::new(16, GFP_KERNEL)?;
135///
136/// assert_eq!(16, b.len());
137/// for i in 0..16 {
138/// if i % 4 == 0 {
139/// b.set_bit(i);
140/// }
141/// }
142/// assert_eq!(Some(0), b.next_bit(0));
143/// assert_eq!(Some(1), b.next_zero_bit(0));
144/// assert_eq!(Some(4), b.next_bit(1));
145/// assert_eq!(Some(5), b.next_zero_bit(4));
146/// assert_eq!(Some(12), b.last_bit());
147/// # Ok::<(), Error>(())
148/// ```
149///
150/// # Invariants
151///
152/// * `nbits` is `<= i32::MAX` and never changes.
153/// * if `nbits <= bindings::BITS_PER_LONG`, then `repr` is a `usize`.
154/// * otherwise, `repr` holds a non-null pointer to an initialized
155/// array of `unsigned long` that is large enough to hold `nbits` bits.
156pub struct BitmapVec {
157 /// Representation of bitmap.
158 repr: BitmapRepr,
159 /// Length of this bitmap. Must be `<= i32::MAX`.
160 nbits: usize,
161}
162
163impl core::ops::Deref for BitmapVec {
164 type Target = Bitmap;
165
166 fn deref(&self) -> &Bitmap {
167 let ptr = if self.nbits <= BITS_PER_LONG {
168 // SAFETY: Bitmap is represented inline.
169 unsafe { core::ptr::addr_of!(self.repr.bitmap) }
170 } else {
171 // SAFETY: Bitmap is represented as array of `unsigned long`.
172 unsafe { self.repr.ptr.as_ptr() }
173 };
174
175 // SAFETY: We got the right pointer and invariants of [`Bitmap`] hold.
176 // An inline bitmap is treated like an array with single element.
177 unsafe { Bitmap::from_raw(ptr, self.nbits) }
178 }
179}
180
181impl core::ops::DerefMut for BitmapVec {
182 fn deref_mut(&mut self) -> &mut Bitmap {
183 let ptr = if self.nbits <= BITS_PER_LONG {
184 // SAFETY: Bitmap is represented inline.
185 unsafe { core::ptr::addr_of_mut!(self.repr.bitmap) }
186 } else {
187 // SAFETY: Bitmap is represented as array of `unsigned long`.
188 unsafe { self.repr.ptr.as_ptr() }
189 };
190
191 // SAFETY: We got the right pointer and invariants of [`BitmapVec`] hold.
192 // An inline bitmap is treated like an array with single element.
193 unsafe { Bitmap::from_raw_mut(ptr, self.nbits) }
194 }
195}
196
197/// Enable ownership transfer to other threads.
198///
199/// SAFETY: We own the underlying bitmap representation.
200unsafe impl Send for BitmapVec {}
201
202/// Enable unsynchronized concurrent access to [`BitmapVec`] through shared references.
203///
204/// SAFETY: `deref()` will return a reference to a [`Bitmap`]. Its methods
205/// take immutable references are either atomic or read-only.
206unsafe impl Sync for BitmapVec {}
207
208impl Drop for BitmapVec {
209 fn drop(&mut self) {
210 if self.nbits <= BITS_PER_LONG {
211 return;
212 }
213 // SAFETY: `self.ptr` was returned by the C `bitmap_zalloc`.
214 //
215 // INVARIANT: there is no other use of the `self.ptr` after this
216 // call and the value is being dropped so the broken invariant is
217 // not observable on function exit.
218 unsafe { bindings::bitmap_free(self.repr.ptr.as_ptr()) };
219 }
220}
221
222impl BitmapVec {
223 /// Constructs a new [`BitmapVec`].
224 ///
225 /// Fails with [`AllocError`] when the [`BitmapVec`] could not be allocated. This
226 /// includes the case when `nbits` is greater than `i32::MAX`.
227 #[inline]
228 pub fn new(nbits: usize, flags: Flags) -> Result<Self, AllocError> {
229 if nbits <= BITS_PER_LONG {
230 return Ok(BitmapVec {
231 repr: BitmapRepr { bitmap: 0 },
232 nbits,
233 });
234 }
235 if nbits > i32::MAX.try_into().unwrap() {
236 return Err(AllocError);
237 }
238 let nbits_u32 = u32::try_from(nbits).unwrap();
239 // SAFETY: `BITS_PER_LONG < nbits` and `nbits <= i32::MAX`.
240 let ptr = unsafe { bindings::bitmap_zalloc(nbits_u32, flags.as_raw()) };
241 let ptr = NonNull::new(ptr).ok_or(AllocError)?;
242 // INVARIANT: `ptr` returned by C `bitmap_zalloc` and `nbits` checked.
243 Ok(BitmapVec {
244 repr: BitmapRepr { ptr },
245 nbits,
246 })
247 }
248
249 /// Returns length of this [`Bitmap`].
250 #[allow(clippy::len_without_is_empty)]
251 #[inline]
252 pub fn len(&self) -> usize {
253 self.nbits
254 }
255
256 /// Fills this `Bitmap` with random bits.
257 #[cfg(CONFIG_FIND_BIT_BENCHMARK_RUST)]
258 pub fn fill_random(&mut self) {
259 // SAFETY: `self.as_mut_ptr` points to either an array of the
260 // appropriate length or one usize.
261 unsafe {
262 bindings::get_random_bytes(
263 self.as_mut_ptr().cast::<ffi::c_void>(),
264 usize::div_ceil(self.nbits, bindings::BITS_PER_LONG as usize)
265 * bindings::BITS_PER_LONG as usize
266 / 8,
267 );
268 }
269 }
270}
271
272impl Bitmap {
273 /// Set bit with index `index`.
274 ///
275 /// ATTENTION: `set_bit` is non-atomic, which differs from the naming
276 /// convention in C code. The corresponding C function is `__set_bit`.
277 ///
278 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
279 /// or equal to `self.nbits`, does nothing.
280 ///
281 /// # Panics
282 ///
283 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
284 /// or equal to `self.nbits`.
285 #[inline]
286 pub fn set_bit(&mut self, index: usize) {
287 bitmap_assert_return!(
288 index < self.len(),
289 "Bit `index` must be < {}, was {}",
290 self.len(),
291 index
292 );
293 // SAFETY: Bit `index` is within bounds.
294 unsafe { bindings::__set_bit(index, self.as_mut_ptr()) };
295 }
296
297 /// Set bit with index `index`, atomically.
298 ///
299 /// This is a relaxed atomic operation (no implied memory barriers).
300 ///
301 /// ATTENTION: The naming convention differs from C, where the corresponding
302 /// function is called `set_bit`.
303 ///
304 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
305 /// or equal to `self.len()`, does nothing.
306 ///
307 /// # Panics
308 ///
309 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
310 /// or equal to `self.len()`.
311 #[inline]
312 pub fn set_bit_atomic(&self, index: usize) {
313 bitmap_assert_return!(
314 index < self.len(),
315 "Bit `index` must be < {}, was {}",
316 self.len(),
317 index
318 );
319 // SAFETY: `index` is within bounds and the caller has ensured that
320 // there is no mix of non-atomic and atomic operations.
321 unsafe { bindings::set_bit(index, self.as_ptr().cast_mut()) };
322 }
323
324 /// Clear `index` bit.
325 ///
326 /// ATTENTION: `clear_bit` is non-atomic, which differs from the naming
327 /// convention in C code. The corresponding C function is `__clear_bit`.
328 ///
329 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
330 /// or equal to `self.len()`, does nothing.
331 ///
332 /// # Panics
333 ///
334 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
335 /// or equal to `self.len()`.
336 #[inline]
337 pub fn clear_bit(&mut self, index: usize) {
338 bitmap_assert_return!(
339 index < self.len(),
340 "Bit `index` must be < {}, was {}",
341 self.len(),
342 index
343 );
344 // SAFETY: `index` is within bounds.
345 unsafe { bindings::__clear_bit(index, self.as_mut_ptr()) };
346 }
347
348 /// Clear `index` bit, atomically.
349 ///
350 /// This is a relaxed atomic operation (no implied memory barriers).
351 ///
352 /// ATTENTION: The naming convention differs from C, where the corresponding
353 /// function is called `clear_bit`.
354 ///
355 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
356 /// or equal to `self.len()`, does nothing.
357 ///
358 /// # Panics
359 ///
360 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
361 /// or equal to `self.len()`.
362 #[inline]
363 pub fn clear_bit_atomic(&self, index: usize) {
364 bitmap_assert_return!(
365 index < self.len(),
366 "Bit `index` must be < {}, was {}",
367 self.len(),
368 index
369 );
370 // SAFETY: `index` is within bounds and the caller has ensured that
371 // there is no mix of non-atomic and atomic operations.
372 unsafe { bindings::clear_bit(index, self.as_ptr().cast_mut()) };
373 }
374
375 /// Copy `src` into this [`Bitmap`] and set any remaining bits to zero.
376 ///
377 /// # Examples
378 ///
379 /// ```
380 /// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
381 /// use kernel::bitmap::BitmapVec;
382 ///
383 /// let mut long_bitmap = BitmapVec::new(256, GFP_KERNEL)?;
384 ///
385 /// assert_eq!(None, long_bitmap.last_bit());
386 ///
387 /// let mut short_bitmap = BitmapVec::new(16, GFP_KERNEL)?;
388 ///
389 /// short_bitmap.set_bit(7);
390 /// long_bitmap.copy_and_extend(&short_bitmap);
391 /// assert_eq!(Some(7), long_bitmap.last_bit());
392 ///
393 /// # Ok::<(), AllocError>(())
394 /// ```
395 #[inline]
396 pub fn copy_and_extend(&mut self, src: &Bitmap) {
397 let len = core::cmp::min(src.len(), self.len());
398 // SAFETY: access to `self` and `src` is within bounds.
399 unsafe {
400 bindings::bitmap_copy_and_extend(
401 self.as_mut_ptr(),
402 src.as_ptr(),
403 len as u32,
404 self.len() as u32,
405 )
406 };
407 }
408
409 /// Finds last set bit.
410 ///
411 /// # Examples
412 ///
413 /// ```
414 /// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
415 /// use kernel::bitmap::BitmapVec;
416 ///
417 /// let bitmap = BitmapVec::new(64, GFP_KERNEL)?;
418 ///
419 /// match bitmap.last_bit() {
420 /// Some(idx) => {
421 /// pr_info!("The last bit has index {idx}.\n");
422 /// }
423 /// None => {
424 /// pr_info!("All bits in this bitmap are 0.\n");
425 /// }
426 /// }
427 /// # Ok::<(), AllocError>(())
428 /// ```
429 #[inline]
430 pub fn last_bit(&self) -> Option<usize> {
431 // SAFETY: `_find_next_bit` access is within bounds due to invariant.
432 let index = unsafe { bindings::_find_last_bit(self.as_ptr(), self.len()) };
433 if index >= self.len() {
434 None
435 } else {
436 Some(index)
437 }
438 }
439
440 /// Finds next set bit, starting from `start`.
441 ///
442 /// Returns `None` if `start` is greater or equal to `self.nbits`.
443 #[inline]
444 pub fn next_bit(&self, start: usize) -> Option<usize> {
445 bitmap_assert!(
446 start < self.len(),
447 "`start` must be < {} was {}",
448 self.len(),
449 start
450 );
451 // SAFETY: `_find_next_bit` tolerates out-of-bounds arguments and returns a
452 // value larger than or equal to `self.len()` in that case.
453 let index = unsafe { bindings::_find_next_bit(self.as_ptr(), self.len(), start) };
454 if index >= self.len() {
455 None
456 } else {
457 Some(index)
458 }
459 }
460
461 /// Finds next zero bit, starting from `start`.
462 /// Returns `None` if `start` is greater than or equal to `self.len()`.
463 #[inline]
464 pub fn next_zero_bit(&self, start: usize) -> Option<usize> {
465 bitmap_assert!(
466 start < self.len(),
467 "`start` must be < {} was {}",
468 self.len(),
469 start
470 );
471 // SAFETY: `_find_next_zero_bit` tolerates out-of-bounds arguments and returns a
472 // value larger than or equal to `self.len()` in that case.
473 let index = unsafe { bindings::_find_next_zero_bit(self.as_ptr(), self.len(), start) };
474 if index >= self.len() {
475 None
476 } else {
477 Some(index)
478 }
479 }
480}
481
482use macros::kunit_tests;
483
484#[kunit_tests(rust_kernel_bitmap)]
485mod tests {
486 use super::*;
487 use kernel::alloc::flags::GFP_KERNEL;
488
489 #[test]
490 fn bitmap_borrow() {
491 let fake_bitmap: [usize; 2] = [0, 0];
492 // SAFETY: `fake_c_bitmap` is an array of expected length.
493 let b = unsafe { Bitmap::from_raw(fake_bitmap.as_ptr(), 2 * BITS_PER_LONG) };
494 assert_eq!(2 * BITS_PER_LONG, b.len());
495 assert_eq!(None, b.next_bit(0));
496 }
497
498 #[test]
499 fn bitmap_copy() {
500 let fake_bitmap: usize = 0xFF;
501 // SAFETY: `fake_c_bitmap` can be used as one-element array of expected length.
502 let b = unsafe { Bitmap::from_raw(core::ptr::addr_of!(fake_bitmap), 8) };
503 assert_eq!(8, b.len());
504 assert_eq!(None, b.next_zero_bit(0));
505 }
506
507 #[test]
508 fn bitmap_vec_new() -> Result<(), AllocError> {
509 let b = BitmapVec::new(0, GFP_KERNEL)?;
510 assert_eq!(0, b.len());
511
512 let b = BitmapVec::new(3, GFP_KERNEL)?;
513 assert_eq!(3, b.len());
514
515 let b = BitmapVec::new(1024, GFP_KERNEL)?;
516 assert_eq!(1024, b.len());
517
518 // Requesting too large values results in [`AllocError`].
519 let res = BitmapVec::new(1 << 31, GFP_KERNEL);
520 assert!(res.is_err());
521 Ok(())
522 }
523
524 #[test]
525 fn bitmap_set_clear_find() -> Result<(), AllocError> {
526 let mut b = BitmapVec::new(128, GFP_KERNEL)?;
527
528 // Zero-initialized
529 assert_eq!(None, b.next_bit(0));
530 assert_eq!(Some(0), b.next_zero_bit(0));
531 assert_eq!(None, b.last_bit());
532
533 b.set_bit(17);
534
535 assert_eq!(Some(17), b.next_bit(0));
536 assert_eq!(Some(17), b.next_bit(17));
537 assert_eq!(None, b.next_bit(18));
538 assert_eq!(Some(17), b.last_bit());
539
540 b.set_bit(107);
541
542 assert_eq!(Some(17), b.next_bit(0));
543 assert_eq!(Some(17), b.next_bit(17));
544 assert_eq!(Some(107), b.next_bit(18));
545 assert_eq!(Some(107), b.last_bit());
546
547 b.clear_bit(17);
548
549 assert_eq!(Some(107), b.next_bit(0));
550 assert_eq!(Some(107), b.last_bit());
551 Ok(())
552 }
553
554 #[test]
555 fn owned_bitmap_out_of_bounds() -> Result<(), AllocError> {
556 // TODO: Kunit #[test]s do not support `cfg` yet,
557 // so we add it here in the body.
558 #[cfg(not(CONFIG_RUST_BITMAP_HARDENED))]
559 {
560 let mut b = BitmapVec::new(128, GFP_KERNEL)?;
561 b.set_bit(2048);
562 b.set_bit_atomic(2048);
563 b.clear_bit(2048);
564 b.clear_bit_atomic(2048);
565 assert_eq!(None, b.next_bit(2048));
566 assert_eq!(None, b.next_zero_bit(2048));
567 assert_eq!(None, b.last_bit());
568 }
569 Ok(())
570 }
571
572 // TODO: uncomment once kunit supports [should_panic] and `cfg`.
573 // #[cfg(CONFIG_RUST_BITMAP_HARDENED)]
574 // #[test]
575 // #[should_panic]
576 // fn owned_bitmap_out_of_bounds() -> Result<(), AllocError> {
577 // let mut b = BitmapVec::new(128, GFP_KERNEL)?;
578 //
579 // b.set_bit(2048);
580 // }
581
582 #[test]
583 fn bitmap_copy_and_extend() -> Result<(), AllocError> {
584 let mut long_bitmap = BitmapVec::new(256, GFP_KERNEL)?;
585
586 long_bitmap.set_bit(3);
587 long_bitmap.set_bit(200);
588
589 let mut short_bitmap = BitmapVec::new(32, GFP_KERNEL)?;
590
591 short_bitmap.set_bit(17);
592
593 long_bitmap.copy_and_extend(&short_bitmap);
594
595 // Previous bits have been cleared.
596 assert_eq!(Some(17), long_bitmap.next_bit(0));
597 assert_eq!(Some(17), long_bitmap.last_bit());
598 Ok(())
599 }
600}