kernel/
maple_tree.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Maple trees.
4//!
5//! C header: [`include/linux/maple_tree.h`](srctree/include/linux/maple_tree.h)
6//!
7//! Reference: <https://docs.kernel.org/core-api/maple_tree.html>
8
9use core::{
10    marker::PhantomData,
11    ops::{Bound, RangeBounds},
12    ptr,
13};
14
15use kernel::{
16    alloc::Flags,
17    error::to_result,
18    prelude::*,
19    types::{ForeignOwnable, Opaque},
20};
21
22/// A maple tree optimized for storing non-overlapping ranges.
23///
24/// # Invariants
25///
26/// Each range in the maple tree owns an instance of `T`.
27#[pin_data(PinnedDrop)]
28#[repr(transparent)]
29pub struct MapleTree<T: ForeignOwnable> {
30    #[pin]
31    tree: Opaque<bindings::maple_tree>,
32    _p: PhantomData<T>,
33}
34
35/// A maple tree with `MT_FLAGS_ALLOC_RANGE` set.
36///
37/// All methods on [`MapleTree`] are also accessible on this type.
38#[pin_data]
39#[repr(transparent)]
40pub struct MapleTreeAlloc<T: ForeignOwnable> {
41    #[pin]
42    tree: MapleTree<T>,
43}
44
45// Make MapleTree methods usable on MapleTreeAlloc.
46impl<T: ForeignOwnable> core::ops::Deref for MapleTreeAlloc<T> {
47    type Target = MapleTree<T>;
48
49    #[inline]
50    fn deref(&self) -> &MapleTree<T> {
51        &self.tree
52    }
53}
54
55#[inline]
56fn to_maple_range(range: impl RangeBounds<usize>) -> Option<(usize, usize)> {
57    let first = match range.start_bound() {
58        Bound::Included(start) => *start,
59        Bound::Excluded(start) => start.checked_add(1)?,
60        Bound::Unbounded => 0,
61    };
62
63    let last = match range.end_bound() {
64        Bound::Included(end) => *end,
65        Bound::Excluded(end) => end.checked_sub(1)?,
66        Bound::Unbounded => usize::MAX,
67    };
68
69    if last < first {
70        return None;
71    }
72
73    Some((first, last))
74}
75
76impl<T: ForeignOwnable> MapleTree<T> {
77    /// Create a new maple tree.
78    ///
79    /// The tree will use the regular implementation with a higher branching factor, rather than
80    /// the allocation tree.
81    #[inline]
82    pub fn new() -> impl PinInit<Self> {
83        pin_init!(MapleTree {
84            // SAFETY: This initializes a maple tree into a pinned slot. The maple tree will be
85            // destroyed in Drop before the memory location becomes invalid.
86            tree <- Opaque::ffi_init(|slot| unsafe { bindings::mt_init_flags(slot, 0) }),
87            _p: PhantomData,
88        })
89    }
90
91    /// Insert the value at the given index.
92    ///
93    /// # Errors
94    ///
95    /// If the maple tree already contains a range using the given index, then this call will
96    /// return an [`InsertErrorKind::Occupied`]. It may also fail if memory allocation fails.
97    ///
98    /// # Examples
99    ///
100    /// ```
101    /// use kernel::maple_tree::{InsertErrorKind, MapleTree};
102    ///
103    /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
104    ///
105    /// let ten = KBox::new(10, GFP_KERNEL)?;
106    /// let twenty = KBox::new(20, GFP_KERNEL)?;
107    /// let the_answer = KBox::new(42, GFP_KERNEL)?;
108    ///
109    /// // These calls will succeed.
110    /// tree.insert(100, ten, GFP_KERNEL)?;
111    /// tree.insert(101, twenty, GFP_KERNEL)?;
112    ///
113    /// // This will fail because the index is already in use.
114    /// assert_eq!(
115    ///     tree.insert(100, the_answer, GFP_KERNEL).unwrap_err().cause,
116    ///     InsertErrorKind::Occupied,
117    /// );
118    /// # Ok::<_, Error>(())
119    /// ```
120    #[inline]
121    pub fn insert(&self, index: usize, value: T, gfp: Flags) -> Result<(), InsertError<T>> {
122        self.insert_range(index..=index, value, gfp)
123    }
124
125    /// Insert a value to the specified range, failing on overlap.
126    ///
127    /// This accepts the usual types of Rust ranges using the `..` and `..=` syntax for exclusive
128    /// and inclusive ranges respectively. The range must not be empty, and must not overlap with
129    /// any existing range.
130    ///
131    /// # Errors
132    ///
133    /// If the maple tree already contains an overlapping range, then this call will return an
134    /// [`InsertErrorKind::Occupied`]. It may also fail if memory allocation fails or if the
135    /// requested range is invalid (e.g. empty).
136    ///
137    /// # Examples
138    ///
139    /// ```
140    /// use kernel::maple_tree::{InsertErrorKind, MapleTree};
141    ///
142    /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
143    ///
144    /// let ten = KBox::new(10, GFP_KERNEL)?;
145    /// let twenty = KBox::new(20, GFP_KERNEL)?;
146    /// let the_answer = KBox::new(42, GFP_KERNEL)?;
147    /// let hundred = KBox::new(100, GFP_KERNEL)?;
148    ///
149    /// // Insert the value 10 at the indices 100 to 499.
150    /// tree.insert_range(100..500, ten, GFP_KERNEL)?;
151    ///
152    /// // Insert the value 20 at the indices 500 to 1000.
153    /// tree.insert_range(500..=1000, twenty, GFP_KERNEL)?;
154    ///
155    /// // This will fail due to overlap with the previous range on index 1000.
156    /// assert_eq!(
157    ///     tree.insert_range(1000..1200, the_answer, GFP_KERNEL).unwrap_err().cause,
158    ///     InsertErrorKind::Occupied,
159    /// );
160    ///
161    /// // When using .. to specify the range, you must be careful to ensure that the range is
162    /// // non-empty.
163    /// assert_eq!(
164    ///     tree.insert_range(72..72, hundred, GFP_KERNEL).unwrap_err().cause,
165    ///     InsertErrorKind::InvalidRequest,
166    /// );
167    /// # Ok::<_, Error>(())
168    /// ```
169    pub fn insert_range<R>(&self, range: R, value: T, gfp: Flags) -> Result<(), InsertError<T>>
170    where
171        R: RangeBounds<usize>,
172    {
173        let Some((first, last)) = to_maple_range(range) else {
174            return Err(InsertError {
175                value,
176                cause: InsertErrorKind::InvalidRequest,
177            });
178        };
179
180        let ptr = T::into_foreign(value);
181
182        // SAFETY: The tree is valid, and we are passing a pointer to an owned instance of `T`.
183        let res = to_result(unsafe {
184            bindings::mtree_insert_range(self.tree.get(), first, last, ptr, gfp.as_raw())
185        });
186
187        if let Err(err) = res {
188            // SAFETY: As `mtree_insert_range` failed, it is safe to take back ownership.
189            let value = unsafe { T::from_foreign(ptr) };
190
191            let cause = if err == ENOMEM {
192                InsertErrorKind::AllocError(kernel::alloc::AllocError)
193            } else if err == EEXIST {
194                InsertErrorKind::Occupied
195            } else {
196                InsertErrorKind::InvalidRequest
197            };
198            Err(InsertError { value, cause })
199        } else {
200            Ok(())
201        }
202    }
203
204    /// Erase the range containing the given index.
205    ///
206    /// # Examples
207    ///
208    /// ```
209    /// use kernel::maple_tree::MapleTree;
210    ///
211    /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
212    ///
213    /// let ten = KBox::new(10, GFP_KERNEL)?;
214    /// let twenty = KBox::new(20, GFP_KERNEL)?;
215    ///
216    /// tree.insert_range(100..500, ten, GFP_KERNEL)?;
217    /// tree.insert(67, twenty, GFP_KERNEL)?;
218    ///
219    /// assert_eq!(tree.erase(67).map(|v| *v), Some(20));
220    /// assert_eq!(tree.erase(275).map(|v| *v), Some(10));
221    ///
222    /// // The previous call erased the entire range, not just index 275.
223    /// assert!(tree.erase(127).is_none());
224    /// # Ok::<_, Error>(())
225    /// ```
226    #[inline]
227    pub fn erase(&self, index: usize) -> Option<T> {
228        // SAFETY: `self.tree` contains a valid maple tree.
229        let ret = unsafe { bindings::mtree_erase(self.tree.get(), index) };
230
231        // SAFETY: If the pointer is not null, then we took ownership of a valid instance of `T`
232        // from the tree.
233        unsafe { T::try_from_foreign(ret) }
234    }
235
236    /// Lock the internal spinlock.
237    #[inline]
238    pub fn lock(&self) -> MapleGuard<'_, T> {
239        // SAFETY: It's safe to lock the spinlock in a maple tree.
240        unsafe { bindings::spin_lock(self.ma_lock()) };
241
242        // INVARIANT: We just took the spinlock.
243        MapleGuard(self)
244    }
245
246    #[inline]
247    fn ma_lock(&self) -> *mut bindings::spinlock_t {
248        // SAFETY: This pointer offset operation stays in-bounds.
249        let lock_ptr = unsafe { &raw mut (*self.tree.get()).__bindgen_anon_1.ma_lock };
250        lock_ptr.cast()
251    }
252
253    /// Free all `T` instances in this tree.
254    ///
255    /// # Safety
256    ///
257    /// This frees Rust data referenced by the maple tree without removing it from the maple tree,
258    /// leaving it in an invalid state. The caller must ensure that this invalid state cannot be
259    /// observed by the end-user.
260    unsafe fn free_all_entries(self: Pin<&mut Self>) {
261        // SAFETY: The caller provides exclusive access to the entire maple tree, so we have
262        // exclusive access to the entire maple tree despite not holding the lock.
263        let mut ma_state = unsafe { MaState::new_raw(self.into_ref().get_ref(), 0, usize::MAX) };
264
265        loop {
266            // This uses the raw accessor because we're destroying pointers without removing them
267            // from the maple tree, which is only valid because this is the destructor.
268            //
269            // Take the rcu lock because mas_find_raw() requires that you hold either the spinlock
270            // or the rcu read lock. This is only really required if memory reclaim might
271            // reallocate entries in the tree, as we otherwise have exclusive access. That feature
272            // doesn't exist yet, so for now, taking the rcu lock only serves the purpose of
273            // silencing lockdep.
274            let ptr = {
275                let _rcu = kernel::sync::rcu::Guard::new();
276                ma_state.mas_find_raw(usize::MAX)
277            };
278            if ptr.is_null() {
279                break;
280            }
281            // SAFETY: By the type invariants, this pointer references a valid value of type `T`.
282            // By the safety requirements, it is okay to free it without removing it from the maple
283            // tree.
284            drop(unsafe { T::from_foreign(ptr) });
285        }
286    }
287}
288
289#[pinned_drop]
290impl<T: ForeignOwnable> PinnedDrop for MapleTree<T> {
291    #[inline]
292    fn drop(mut self: Pin<&mut Self>) {
293        // We only iterate the tree if the Rust value has a destructor.
294        if core::mem::needs_drop::<T>() {
295            // SAFETY: Other than the below `mtree_destroy` call, the tree will not be accessed
296            // after this call.
297            unsafe { self.as_mut().free_all_entries() };
298        }
299
300        // SAFETY: The tree is valid, and will not be accessed after this call.
301        unsafe { bindings::mtree_destroy(self.tree.get()) };
302    }
303}
304
305/// A reference to a [`MapleTree`] that owns the inner lock.
306///
307/// # Invariants
308///
309/// This guard owns the inner spinlock.
310#[must_use = "if unused, the lock will be immediately unlocked"]
311pub struct MapleGuard<'tree, T: ForeignOwnable>(&'tree MapleTree<T>);
312
313impl<'tree, T: ForeignOwnable> Drop for MapleGuard<'tree, T> {
314    #[inline]
315    fn drop(&mut self) {
316        // SAFETY: By the type invariants, we hold this spinlock.
317        unsafe { bindings::spin_unlock(self.0.ma_lock()) };
318    }
319}
320
321impl<'tree, T: ForeignOwnable> MapleGuard<'tree, T> {
322    /// Create a [`MaState`] protected by this lock guard.
323    pub fn ma_state(&mut self, first: usize, end: usize) -> MaState<'_, T> {
324        // SAFETY: The `MaState` borrows this `MapleGuard`, so it can also borrow the `MapleGuard`s
325        // read/write permissions to the maple tree.
326        unsafe { MaState::new_raw(self.0, first, end) }
327    }
328
329    /// Load the value at the given index.
330    ///
331    /// # Examples
332    ///
333    /// Read the value while holding the spinlock.
334    ///
335    /// ```
336    /// use kernel::maple_tree::MapleTree;
337    ///
338    /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
339    ///
340    /// let ten = KBox::new(10, GFP_KERNEL)?;
341    /// let twenty = KBox::new(20, GFP_KERNEL)?;
342    /// tree.insert(100, ten, GFP_KERNEL)?;
343    /// tree.insert(200, twenty, GFP_KERNEL)?;
344    ///
345    /// let mut lock = tree.lock();
346    /// assert_eq!(lock.load(100).map(|v| *v), Some(10));
347    /// assert_eq!(lock.load(200).map(|v| *v), Some(20));
348    /// assert_eq!(lock.load(300).map(|v| *v), None);
349    /// # Ok::<_, Error>(())
350    /// ```
351    ///
352    /// Increment refcount under the lock, to keep value alive afterwards.
353    ///
354    /// ```
355    /// use kernel::maple_tree::MapleTree;
356    /// use kernel::sync::Arc;
357    ///
358    /// let tree = KBox::pin_init(MapleTree::<Arc<i32>>::new(), GFP_KERNEL)?;
359    ///
360    /// let ten = Arc::new(10, GFP_KERNEL)?;
361    /// let twenty = Arc::new(20, GFP_KERNEL)?;
362    /// tree.insert(100, ten, GFP_KERNEL)?;
363    /// tree.insert(200, twenty, GFP_KERNEL)?;
364    ///
365    /// // Briefly take the lock to increment the refcount.
366    /// let value = tree.lock().load(100).map(Arc::from);
367    ///
368    /// // At this point, another thread might remove the value.
369    /// tree.erase(100);
370    ///
371    /// // But we can still access it because we took a refcount.
372    /// assert_eq!(value.map(|v| *v), Some(10));
373    /// # Ok::<_, Error>(())
374    /// ```
375    #[inline]
376    pub fn load(&mut self, index: usize) -> Option<T::BorrowedMut<'_>> {
377        // SAFETY: `self.tree` contains a valid maple tree.
378        let ret = unsafe { bindings::mtree_load(self.0.tree.get(), index) };
379        if ret.is_null() {
380            return None;
381        }
382
383        // SAFETY: If the pointer is not null, then it references a valid instance of `T`. It is
384        // safe to borrow the instance mutably because the signature of this function enforces that
385        // the mutable borrow is not used after the spinlock is dropped.
386        Some(unsafe { T::borrow_mut(ret) })
387    }
388}
389
390impl<T: ForeignOwnable> MapleTreeAlloc<T> {
391    /// Create a new allocation tree.
392    pub fn new() -> impl PinInit<Self> {
393        let tree = pin_init!(MapleTree {
394            // SAFETY: This initializes a maple tree into a pinned slot. The maple tree will be
395            // destroyed in Drop before the memory location becomes invalid.
396            tree <- Opaque::ffi_init(|slot| unsafe {
397                bindings::mt_init_flags(slot, bindings::MT_FLAGS_ALLOC_RANGE)
398            }),
399            _p: PhantomData,
400        });
401
402        pin_init!(MapleTreeAlloc { tree <- tree })
403    }
404
405    /// Insert an entry with the given size somewhere in the given range.
406    ///
407    /// The maple tree will search for a location in the given range where there is space to insert
408    /// the new range. If there is not enough available space, then an error will be returned.
409    ///
410    /// The index of the new range is returned.
411    ///
412    /// # Examples
413    ///
414    /// ```
415    /// use kernel::maple_tree::{MapleTreeAlloc, AllocErrorKind};
416    ///
417    /// let tree = KBox::pin_init(MapleTreeAlloc::<KBox<i32>>::new(), GFP_KERNEL)?;
418    ///
419    /// let ten = KBox::new(10, GFP_KERNEL)?;
420    /// let twenty = KBox::new(20, GFP_KERNEL)?;
421    /// let thirty = KBox::new(30, GFP_KERNEL)?;
422    /// let hundred = KBox::new(100, GFP_KERNEL)?;
423    ///
424    /// // Allocate three ranges.
425    /// let idx1 = tree.alloc_range(100, ten, ..1000, GFP_KERNEL)?;
426    /// let idx2 = tree.alloc_range(100, twenty, ..1000, GFP_KERNEL)?;
427    /// let idx3 = tree.alloc_range(100, thirty, ..1000, GFP_KERNEL)?;
428    ///
429    /// assert_eq!(idx1, 0);
430    /// assert_eq!(idx2, 100);
431    /// assert_eq!(idx3, 200);
432    ///
433    /// // This will fail because the remaining space is too small.
434    /// assert_eq!(
435    ///     tree.alloc_range(800, hundred, ..1000, GFP_KERNEL).unwrap_err().cause,
436    ///     AllocErrorKind::Busy,
437    /// );
438    /// # Ok::<_, Error>(())
439    /// ```
440    pub fn alloc_range<R>(
441        &self,
442        size: usize,
443        value: T,
444        range: R,
445        gfp: Flags,
446    ) -> Result<usize, AllocError<T>>
447    where
448        R: RangeBounds<usize>,
449    {
450        let Some((min, max)) = to_maple_range(range) else {
451            return Err(AllocError {
452                value,
453                cause: AllocErrorKind::InvalidRequest,
454            });
455        };
456
457        let ptr = T::into_foreign(value);
458        let mut index = 0;
459
460        // SAFETY: The tree is valid, and we are passing a pointer to an owned instance of `T`.
461        let res = to_result(unsafe {
462            bindings::mtree_alloc_range(
463                self.tree.tree.get(),
464                &mut index,
465                ptr,
466                size,
467                min,
468                max,
469                gfp.as_raw(),
470            )
471        });
472
473        if let Err(err) = res {
474            // SAFETY: As `mtree_alloc_range` failed, it is safe to take back ownership.
475            let value = unsafe { T::from_foreign(ptr) };
476
477            let cause = if err == ENOMEM {
478                AllocErrorKind::AllocError(kernel::alloc::AllocError)
479            } else if err == EBUSY {
480                AllocErrorKind::Busy
481            } else {
482                AllocErrorKind::InvalidRequest
483            };
484            Err(AllocError { value, cause })
485        } else {
486            Ok(index)
487        }
488    }
489}
490
491/// A helper type used for navigating a [`MapleTree`].
492///
493/// # Invariants
494///
495/// For the duration of `'tree`:
496///
497/// * The `ma_state` references a valid `MapleTree<T>`.
498/// * The `ma_state` has read/write access to the tree.
499pub struct MaState<'tree, T: ForeignOwnable> {
500    state: bindings::ma_state,
501    _phantom: PhantomData<&'tree mut MapleTree<T>>,
502}
503
504impl<'tree, T: ForeignOwnable> MaState<'tree, T> {
505    /// Initialize a new `MaState` with the given tree.
506    ///
507    /// # Safety
508    ///
509    /// The caller must ensure that this `MaState` has read/write access to the maple tree.
510    #[inline]
511    unsafe fn new_raw(mt: &'tree MapleTree<T>, first: usize, end: usize) -> Self {
512        // INVARIANT:
513        // * Having a reference ensures that the `MapleTree<T>` is valid for `'tree`.
514        // * The caller ensures that we have read/write access.
515        Self {
516            state: bindings::ma_state {
517                tree: mt.tree.get(),
518                index: first,
519                last: end,
520                node: ptr::null_mut(),
521                status: bindings::maple_status_ma_start,
522                min: 0,
523                max: usize::MAX,
524                alloc: ptr::null_mut(),
525                mas_flags: 0,
526                store_type: bindings::store_type_wr_invalid,
527                ..Default::default()
528            },
529            _phantom: PhantomData,
530        }
531    }
532
533    #[inline]
534    fn as_raw(&mut self) -> *mut bindings::ma_state {
535        &raw mut self.state
536    }
537
538    #[inline]
539    fn mas_find_raw(&mut self, max: usize) -> *mut c_void {
540        // SAFETY: By the type invariants, the `ma_state` is active and we have read/write access
541        // to the tree.
542        unsafe { bindings::mas_find(self.as_raw(), max) }
543    }
544
545    /// Find the next entry in the maple tree.
546    ///
547    /// # Examples
548    ///
549    /// Iterate the maple tree.
550    ///
551    /// ```
552    /// use kernel::maple_tree::MapleTree;
553    /// use kernel::sync::Arc;
554    ///
555    /// let tree = KBox::pin_init(MapleTree::<Arc<i32>>::new(), GFP_KERNEL)?;
556    ///
557    /// let ten = Arc::new(10, GFP_KERNEL)?;
558    /// let twenty = Arc::new(20, GFP_KERNEL)?;
559    /// tree.insert(100, ten, GFP_KERNEL)?;
560    /// tree.insert(200, twenty, GFP_KERNEL)?;
561    ///
562    /// let mut ma_lock = tree.lock();
563    /// let mut iter = ma_lock.ma_state(0, usize::MAX);
564    ///
565    /// assert_eq!(iter.find(usize::MAX).map(|v| *v), Some(10));
566    /// assert_eq!(iter.find(usize::MAX).map(|v| *v), Some(20));
567    /// assert!(iter.find(usize::MAX).is_none());
568    /// # Ok::<_, Error>(())
569    /// ```
570    #[inline]
571    pub fn find(&mut self, max: usize) -> Option<T::BorrowedMut<'_>> {
572        let ret = self.mas_find_raw(max);
573        if ret.is_null() {
574            return None;
575        }
576
577        // SAFETY: If the pointer is not null, then it references a valid instance of `T`. It's
578        // safe to access it mutably as the returned reference borrows this `MaState`, and the
579        // `MaState` has read/write access to the maple tree.
580        Some(unsafe { T::borrow_mut(ret) })
581    }
582}
583
584/// Error type for failure to insert a new value.
585pub struct InsertError<T> {
586    /// The value that could not be inserted.
587    pub value: T,
588    /// The reason for the failure to insert.
589    pub cause: InsertErrorKind,
590}
591
592/// The reason for the failure to insert.
593#[derive(PartialEq, Eq, Copy, Clone, Debug)]
594pub enum InsertErrorKind {
595    /// There is already a value in the requested range.
596    Occupied,
597    /// Failure to allocate memory.
598    AllocError(kernel::alloc::AllocError),
599    /// The insertion request was invalid.
600    InvalidRequest,
601}
602
603impl From<InsertErrorKind> for Error {
604    #[inline]
605    fn from(kind: InsertErrorKind) -> Error {
606        match kind {
607            InsertErrorKind::Occupied => EEXIST,
608            InsertErrorKind::AllocError(kernel::alloc::AllocError) => ENOMEM,
609            InsertErrorKind::InvalidRequest => EINVAL,
610        }
611    }
612}
613
614impl<T> From<InsertError<T>> for Error {
615    #[inline]
616    fn from(insert_err: InsertError<T>) -> Error {
617        Error::from(insert_err.cause)
618    }
619}
620
621/// Error type for failure to insert a new value.
622pub struct AllocError<T> {
623    /// The value that could not be inserted.
624    pub value: T,
625    /// The reason for the failure to insert.
626    pub cause: AllocErrorKind,
627}
628
629/// The reason for the failure to insert.
630#[derive(PartialEq, Eq, Copy, Clone)]
631pub enum AllocErrorKind {
632    /// There is not enough space for the requested allocation.
633    Busy,
634    /// Failure to allocate memory.
635    AllocError(kernel::alloc::AllocError),
636    /// The insertion request was invalid.
637    InvalidRequest,
638}
639
640impl From<AllocErrorKind> for Error {
641    #[inline]
642    fn from(kind: AllocErrorKind) -> Error {
643        match kind {
644            AllocErrorKind::Busy => EBUSY,
645            AllocErrorKind::AllocError(kernel::alloc::AllocError) => ENOMEM,
646            AllocErrorKind::InvalidRequest => EINVAL,
647        }
648    }
649}
650
651impl<T> From<AllocError<T>> for Error {
652    #[inline]
653    fn from(insert_err: AllocError<T>) -> Error {
654        Error::from(insert_err.cause)
655    }
656}