diff options
Diffstat (limited to 'rust/kernel')
| -rw-r--r-- | rust/kernel/alloc.rs | 73 | ||||
| -rw-r--r-- | rust/kernel/alloc/allocator.rs (renamed from rust/kernel/allocator.rs) | 19 | ||||
| -rw-r--r-- | rust/kernel/alloc/box_ext.rs | 56 | ||||
| -rw-r--r-- | rust/kernel/alloc/vec_ext.rs | 182 | ||||
| -rw-r--r-- | rust/kernel/error.rs | 14 | ||||
| -rw-r--r-- | rust/kernel/init.rs | 74 | ||||
| -rw-r--r-- | rust/kernel/init/macros.rs | 47 | ||||
| -rw-r--r-- | rust/kernel/lib.rs | 15 | ||||
| -rw-r--r-- | rust/kernel/net/phy.rs | 6 | ||||
| -rw-r--r-- | rust/kernel/prelude.rs | 2 | ||||
| -rw-r--r-- | rust/kernel/print.rs | 5 | ||||
| -rw-r--r-- | rust/kernel/std_vendor.rs | 7 | ||||
| -rw-r--r-- | rust/kernel/str.rs | 98 | ||||
| -rw-r--r-- | rust/kernel/sync.rs | 6 | ||||
| -rw-r--r-- | rust/kernel/sync/arc.rs | 189 | ||||
| -rw-r--r-- | rust/kernel/sync/condvar.rs | 3 | ||||
| -rw-r--r-- | rust/kernel/sync/lock.rs | 2 | ||||
| -rw-r--r-- | rust/kernel/sync/lock/mutex.rs | 4 | ||||
| -rw-r--r-- | rust/kernel/sync/lock/spinlock.rs | 4 | ||||
| -rw-r--r-- | rust/kernel/task.rs | 2 | ||||
| -rw-r--r-- | rust/kernel/time.rs | 63 | ||||
| -rw-r--r-- | rust/kernel/types.rs | 6 | ||||
| -rw-r--r-- | rust/kernel/workqueue.rs | 54 | 
23 files changed, 754 insertions, 177 deletions
| diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs new file mode 100644 index 000000000000..531b5e471cb1 --- /dev/null +++ b/rust/kernel/alloc.rs @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Extensions to the [`alloc`] crate. + +#[cfg(not(test))] +#[cfg(not(testlib))] +mod allocator; +pub mod box_ext; +pub mod vec_ext; + +/// Indicates an allocation error. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct AllocError; + +/// Flags to be used when allocating memory. +/// +/// They can be combined with the operators `|`, `&`, and `!`. +/// +/// Values can be used from the [`flags`] module. +#[derive(Clone, Copy)] +pub struct Flags(u32); + +impl core::ops::BitOr for Flags { +    type Output = Self; +    fn bitor(self, rhs: Self) -> Self::Output { +        Self(self.0 | rhs.0) +    } +} + +impl core::ops::BitAnd for Flags { +    type Output = Self; +    fn bitand(self, rhs: Self) -> Self::Output { +        Self(self.0 & rhs.0) +    } +} + +impl core::ops::Not for Flags { +    type Output = Self; +    fn not(self) -> Self::Output { +        Self(!self.0) +    } +} + +/// Allocation flags. +/// +/// These are meant to be used in functions that can allocate memory. +pub mod flags { +    use super::Flags; + +    /// Zeroes out the allocated memory. +    /// +    /// This is normally or'd with other flags. +    pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO); + +    /// Users can not sleep and need the allocation to succeed. +    /// +    /// A lower watermark is applied to allow access to "atomic reserves". The current +    /// implementation doesn't support NMI and few other strict non-preemptive contexts (e.g. +    /// raw_spin_lock). The same applies to [`GFP_NOWAIT`]. +    pub const GFP_ATOMIC: Flags = Flags(bindings::GFP_ATOMIC); + +    /// Typical for kernel-internal allocations. The caller requires ZONE_NORMAL or a lower zone +    /// for direct access but can direct reclaim. +    pub const GFP_KERNEL: Flags = Flags(bindings::GFP_KERNEL); + +    /// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg. +    pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT); + +    /// Ror kernel allocations that should not stall for direct reclaim, start physical IO or +    /// use any filesystem callback.  It is very likely to fail to allocate memory, even for very +    /// small allocations. +    pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT); +} diff --git a/rust/kernel/allocator.rs b/rust/kernel/alloc/allocator.rs index 01ad139e19bc..229642960cd1 100644 --- a/rust/kernel/allocator.rs +++ b/rust/kernel/alloc/allocator.rs @@ -2,11 +2,10 @@  //! Allocator support. +use super::{flags::*, Flags};  use core::alloc::{GlobalAlloc, Layout};  use core::ptr; -use crate::bindings; -  struct KernelAllocator;  /// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment. @@ -15,7 +14,7 @@ struct KernelAllocator;  ///  /// - `ptr` can be either null or a pointer which has been allocated by this allocator.  /// - `new_layout` must have a non-zero size. -unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gfp_t) -> *mut u8 { +pub(crate) unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: Flags) -> *mut u8 {      // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.      let layout = new_layout.pad_to_align(); @@ -36,14 +35,14 @@ unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gf      //   function safety requirement.      // - `size` is greater than 0 since it's either a `layout.size()` (which cannot be zero      //   according to the function safety requirement) or a result from `next_power_of_two()`. -    unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags) as *mut u8 } +    unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags.0) as *mut u8 }  }  unsafe impl GlobalAlloc for KernelAllocator {      unsafe fn alloc(&self, layout: Layout) -> *mut u8 {          // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety          // requirement. -        unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) } +        unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL) }      }      unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { @@ -64,19 +63,13 @@ unsafe impl GlobalAlloc for KernelAllocator {          //   requirement.          // - the size of `layout` is not zero because `new_size` is not zero by the function safety          //   requirement. -        unsafe { krealloc_aligned(ptr, layout, bindings::GFP_KERNEL) } +        unsafe { krealloc_aligned(ptr, layout, GFP_KERNEL) }      }      unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {          // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety          // requirement. -        unsafe { -            krealloc_aligned( -                ptr::null_mut(), -                layout, -                bindings::GFP_KERNEL | bindings::__GFP_ZERO, -            ) -        } +        unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL | __GFP_ZERO) }      }  } diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs new file mode 100644 index 000000000000..829cb1c1cf9e --- /dev/null +++ b/rust/kernel/alloc/box_ext.rs @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Extensions to [`Box`] for fallible allocations. + +use super::{AllocError, Flags}; +use alloc::boxed::Box; +use core::mem::MaybeUninit; + +/// Extensions to [`Box`]. +pub trait BoxExt<T>: Sized { +    /// Allocates a new box. +    /// +    /// The allocation may fail, in which case an error is returned. +    fn new(x: T, flags: Flags) -> Result<Self, AllocError>; + +    /// Allocates a new uninitialised box. +    /// +    /// The allocation may fail, in which case an error is returned. +    fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError>; +} + +impl<T> BoxExt<T> for Box<T> { +    fn new(x: T, flags: Flags) -> Result<Self, AllocError> { +        let b = <Self as BoxExt<_>>::new_uninit(flags)?; +        Ok(Box::write(b, x)) +    } + +    #[cfg(any(test, testlib))] +    fn new_uninit(_flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError> { +        Ok(Box::new_uninit()) +    } + +    #[cfg(not(any(test, testlib)))] +    fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError> { +        let ptr = if core::mem::size_of::<MaybeUninit<T>>() == 0 { +            core::ptr::NonNull::<_>::dangling().as_ptr() +        } else { +            let layout = core::alloc::Layout::new::<MaybeUninit<T>>(); + +            // SAFETY: Memory is being allocated (first arg is null). The only other source of +            // safety issues is sleeping on atomic context, which is addressed by klint. Lastly, +            // the type is not a SZT (checked above). +            let ptr = +                unsafe { super::allocator::krealloc_aligned(core::ptr::null_mut(), layout, flags) }; +            if ptr.is_null() { +                return Err(AllocError); +            } + +            ptr.cast::<MaybeUninit<T>>() +        }; + +        // SAFETY: For non-zero-sized types, we allocate above using the global allocator. For +        // zero-sized types, we use `NonNull::dangling`. +        Ok(unsafe { Box::from_raw(ptr) }) +    } +} diff --git a/rust/kernel/alloc/vec_ext.rs b/rust/kernel/alloc/vec_ext.rs new file mode 100644 index 000000000000..e9a81052728a --- /dev/null +++ b/rust/kernel/alloc/vec_ext.rs @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Extensions to [`Vec`] for fallible allocations. + +use super::{AllocError, Flags}; +use alloc::vec::Vec; +use core::ptr; + +/// Extensions to [`Vec`]. +pub trait VecExt<T>: Sized { +    /// Creates a new [`Vec`] instance with at least the given capacity. +    /// +    /// # Examples +    /// +    /// ``` +    /// let v = Vec::<u32>::with_capacity(20, GFP_KERNEL)?; +    /// +    /// assert!(v.capacity() >= 20); +    /// # Ok::<(), Error>(()) +    /// ``` +    fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError>; + +    /// Appends an element to the back of the [`Vec`] instance. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = Vec::new(); +    /// v.push(1, GFP_KERNEL)?; +    /// assert_eq!(&v, &[1]); +    /// +    /// v.push(2, GFP_KERNEL)?; +    /// assert_eq!(&v, &[1, 2]); +    /// # Ok::<(), Error>(()) +    /// ``` +    fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError>; + +    /// Pushes clones of the elements of slice into the [`Vec`] instance. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = Vec::new(); +    /// v.push(1, GFP_KERNEL)?; +    /// +    /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?; +    /// assert_eq!(&v, &[1, 20, 30, 40]); +    /// +    /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?; +    /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]); +    /// # Ok::<(), Error>(()) +    /// ``` +    fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> +    where +        T: Clone; + +    /// Ensures that the capacity exceeds the length by at least `additional` elements. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = Vec::new(); +    /// v.push(1, GFP_KERNEL)?; +    /// +    /// v.reserve(10, GFP_KERNEL)?; +    /// let cap = v.capacity(); +    /// assert!(cap >= 10); +    /// +    /// v.reserve(10, GFP_KERNEL)?; +    /// let new_cap = v.capacity(); +    /// assert_eq!(new_cap, cap); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError>; +} + +impl<T> VecExt<T> for Vec<T> { +    fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError> { +        let mut v = Vec::new(); +        <Self as VecExt<_>>::reserve(&mut v, capacity, flags)?; +        Ok(v) +    } + +    fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> { +        <Self as VecExt<_>>::reserve(self, 1, flags)?; +        let s = self.spare_capacity_mut(); +        s[0].write(v); + +        // SAFETY: We just initialised the first spare entry, so it is safe to increase the length +        // by 1. We also know that the new length is <= capacity because of the previous call to +        // `reserve` above. +        unsafe { self.set_len(self.len() + 1) }; +        Ok(()) +    } + +    fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> +    where +        T: Clone, +    { +        <Self as VecExt<_>>::reserve(self, other.len(), flags)?; +        for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) { +            slot.write(item.clone()); +        } + +        // SAFETY: We just initialised the `other.len()` spare entries, so it is safe to increase +        // the length by the same amount. We also know that the new length is <= capacity because +        // of the previous call to `reserve` above. +        unsafe { self.set_len(self.len() + other.len()) }; +        Ok(()) +    } + +    #[cfg(any(test, testlib))] +    fn reserve(&mut self, additional: usize, _flags: Flags) -> Result<(), AllocError> { +        Vec::reserve(self, additional); +        Ok(()) +    } + +    #[cfg(not(any(test, testlib)))] +    fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> { +        let len = self.len(); +        let cap = self.capacity(); + +        if cap - len >= additional { +            return Ok(()); +        } + +        if core::mem::size_of::<T>() == 0 { +            // The capacity is already `usize::MAX` for SZTs, we can't go higher. +            return Err(AllocError); +        } + +        // We know cap is <= `isize::MAX` because `Layout::array` fails if the resulting byte size +        // is greater than `isize::MAX`. So the multiplication by two won't overflow. +        let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?); +        let layout = core::alloc::Layout::array::<T>(new_cap).map_err(|_| AllocError)?; + +        let (old_ptr, len, cap) = destructure(self); + +        // We need to make sure that `ptr` is either NULL or comes from a previous call to +        // `krealloc_aligned`. A `Vec<T>`'s `ptr` value is not guaranteed to be NULL and might be +        // dangling after being created with `Vec::new`. Instead, we can rely on `Vec<T>`'s capacity +        // to be zero if no memory has been allocated yet. +        let ptr = if cap == 0 { ptr::null_mut() } else { old_ptr }; + +        // SAFETY: `ptr` is valid because it's either NULL or comes from a previous call to +        // `krealloc_aligned`. We also verified that the type is not a ZST. +        let new_ptr = unsafe { super::allocator::krealloc_aligned(ptr.cast(), layout, flags) }; +        if new_ptr.is_null() { +            // SAFETY: We are just rebuilding the existing `Vec` with no changes. +            unsafe { rebuild(self, old_ptr, len, cap) }; +            Err(AllocError) +        } else { +            // SAFETY: `ptr` has been reallocated with the layout for `new_cap` elements. New cap +            // is greater than `cap`, so it continues to be >= `len`. +            unsafe { rebuild(self, new_ptr.cast::<T>(), len, new_cap) }; +            Ok(()) +        } +    } +} + +#[cfg(not(any(test, testlib)))] +fn destructure<T>(v: &mut Vec<T>) -> (*mut T, usize, usize) { +    let mut tmp = Vec::new(); +    core::mem::swap(&mut tmp, v); +    let mut tmp = core::mem::ManuallyDrop::new(tmp); +    let len = tmp.len(); +    let cap = tmp.capacity(); +    (tmp.as_mut_ptr(), len, cap) +} + +/// Rebuilds a `Vec` from a pointer, length, and capacity. +/// +/// # Safety +/// +/// The same as [`Vec::from_raw_parts`]. +#[cfg(not(any(test, testlib)))] +unsafe fn rebuild<T>(v: &mut Vec<T>, ptr: *mut T, len: usize, cap: usize) { +    // SAFETY: The safety requirements from this function satisfy those of `from_raw_parts`. +    let mut tmp = unsafe { Vec::from_raw_parts(ptr, len, cap) }; +    core::mem::swap(&mut tmp, v); +} diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs index 4786d3ee1e92..55280ae9fe40 100644 --- a/rust/kernel/error.rs +++ b/rust/kernel/error.rs @@ -4,14 +4,10 @@  //!  //! C header: [`include/uapi/asm-generic/errno-base.h`](srctree/include/uapi/asm-generic/errno-base.h) -use crate::str::CStr; +use crate::{alloc::AllocError, str::CStr}; -use alloc::{ -    alloc::{AllocError, LayoutError}, -    collections::TryReserveError, -}; +use alloc::alloc::LayoutError; -use core::convert::From;  use core::fmt;  use core::num::TryFromIntError;  use core::str::Utf8Error; @@ -192,12 +188,6 @@ impl From<Utf8Error> for Error {      }  } -impl From<TryReserveError> for Error { -    fn from(_: TryReserveError) -> Error { -        code::ENOMEM -    } -} -  impl From<LayoutError> for Error {      fn from(_: LayoutError) -> Error {          code::ENOMEM diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs index 424257284d16..68605b633e73 100644 --- a/rust/kernel/init.rs +++ b/rust/kernel/init.rs @@ -68,7 +68,7 @@  //! #     a <- new_mutex!(42, "Foo::a"),  //! #     b: 24,  //! # }); -//! let foo: Result<Pin<Box<Foo>>> = Box::pin_init(foo); +//! let foo: Result<Pin<Box<Foo>>> = Box::pin_init(foo, GFP_KERNEL);  //! ```  //!  //! For more information see the [`pin_init!`] macro. @@ -80,14 +80,15 @@  //!  //! ```rust  //! # use kernel::sync::{new_mutex, Arc, Mutex}; -//! let mtx: Result<Arc<Mutex<usize>>> = Arc::pin_init(new_mutex!(42, "example::mtx")); +//! let mtx: Result<Arc<Mutex<usize>>> = +//!     Arc::pin_init(new_mutex!(42, "example::mtx"), GFP_KERNEL);  //! ```  //!  //! To declare an init macro/function you just return an [`impl PinInit<T, E>`]:  //!  //! ```rust  //! # #![allow(clippy::disallowed_names)] -//! # use kernel::{sync::Mutex, prelude::*, new_mutex, init::PinInit, try_pin_init}; +//! # use kernel::{sync::Mutex, new_mutex, init::PinInit, try_pin_init};  //! #[pin_data]  //! struct DriverData {  //!     #[pin] @@ -99,7 +100,7 @@  //!     fn new() -> impl PinInit<Self, Error> {  //!         try_pin_init!(Self {  //!             status <- new_mutex!(0, "DriverData::status"), -//!             buffer: Box::init(kernel::init::zeroed())?, +//!             buffer: Box::init(kernel::init::zeroed(), GFP_KERNEL)?,  //!         })  //!     }  //! } @@ -121,7 +122,7 @@  //!  //! ```rust  //! # #![allow(unreachable_pub, clippy::disallowed_names)] -//! use kernel::{prelude::*, init, types::Opaque}; +//! use kernel::{init, types::Opaque};  //! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};  //! # mod bindings {  //! #     #![allow(non_camel_case_types)] @@ -210,13 +211,13 @@  //! [`pin_init!`]: crate::pin_init!  use crate::{ +    alloc::{box_ext::BoxExt, AllocError, Flags},      error::{self, Error},      sync::UniqueArc,      types::{Opaque, ScopeGuard},  };  use alloc::boxed::Box;  use core::{ -    alloc::AllocError,      cell::UnsafeCell,      convert::Infallible,      marker::PhantomData, @@ -305,9 +306,9 @@ macro_rules! stack_pin_init {  ///  /// stack_try_pin_init!(let foo: Result<Pin<&mut Foo>, AllocError> = pin_init!(Foo {  ///     a <- new_mutex!(42), -///     b: Box::try_new(Bar { +///     b: Box::new(Bar {  ///         x: 64, -///     })?, +///     }, GFP_KERNEL)?,  /// }));  /// let foo = foo.unwrap();  /// pr_info!("a: {}", &*foo.a.lock()); @@ -331,9 +332,9 @@ macro_rules! stack_pin_init {  ///  /// stack_try_pin_init!(let foo: Pin<&mut Foo> =? pin_init!(Foo {  ///     a <- new_mutex!(42), -///     b: Box::try_new(Bar { +///     b: Box::new(Bar {  ///         x: 64, -///     })?, +///     }, GFP_KERNEL)?,  /// }));  /// pr_info!("a: {}", &*foo.a.lock());  /// # Ok::<_, AllocError>(()) @@ -390,7 +391,7 @@ macro_rules! stack_try_pin_init {  ///     },  /// });  /// # initializer } -/// # Box::pin_init(demo()).unwrap(); +/// # Box::pin_init(demo(), GFP_KERNEL).unwrap();  /// ```  ///  /// Arbitrary Rust expressions can be used to set the value of a variable. @@ -412,7 +413,7 @@ macro_rules! stack_try_pin_init {  ///  /// ```rust  /// # #![allow(clippy::disallowed_names)] -/// # use kernel::{init, pin_init, prelude::*, init::*}; +/// # use kernel::{init, pin_init, init::*};  /// # use core::pin::Pin;  /// # #[pin_data]  /// # struct Foo { @@ -460,7 +461,7 @@ macro_rules! stack_try_pin_init {  /// #         })  /// #     }  /// # } -/// let foo = Box::pin_init(Foo::new()); +/// let foo = Box::pin_init(Foo::new(), GFP_KERNEL);  /// ```  ///  /// They can also easily embed it into their own `struct`s: @@ -600,7 +601,7 @@ macro_rules! pin_init {  /// impl BigBuf {  ///     fn new() -> impl PinInit<Self, Error> {  ///         try_pin_init!(Self { -///             big: Box::init(init::zeroed())?, +///             big: Box::init(init::zeroed(), GFP_KERNEL)?,  ///             small: [0; 1024 * 1024],  ///             ptr: core::ptr::null_mut(),  ///         }? Error) @@ -701,7 +702,7 @@ macro_rules! init {  /// impl BigBuf {  ///     fn new() -> impl Init<Self, Error> {  ///         try_init!(Self { -///             big: Box::init(zeroed())?, +///             big: Box::init(zeroed(), GFP_KERNEL)?,  ///             small: [0; 1024 * 1024],  ///         }? Error)  ///     } @@ -1013,7 +1014,7 @@ pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {  ///  /// ```rust  /// use kernel::{error::Error, init::init_array_from_fn}; -/// let array: Box<[usize; 1_000]> = Box::init::<Error>(init_array_from_fn(|i| i)).unwrap(); +/// let array: Box<[usize; 1_000]> = Box::init::<Error>(init_array_from_fn(|i| i), GFP_KERNEL).unwrap();  /// assert_eq!(array.len(), 1_000);  /// ```  pub fn init_array_from_fn<I, const N: usize, T, E>( @@ -1057,7 +1058,7 @@ where  /// ```rust  /// use kernel::{sync::{Arc, Mutex}, init::pin_init_array_from_fn, new_mutex};  /// let array: Arc<[Mutex<usize>; 1_000]> = -///     Arc::pin_init(pin_init_array_from_fn(|i| new_mutex!(i))).unwrap(); +///     Arc::pin_init(pin_init_array_from_fn(|i| new_mutex!(i)), GFP_KERNEL).unwrap();  /// assert_eq!(array.len(), 1_000);  /// ```  pub fn pin_init_array_from_fn<I, const N: usize, T, E>( @@ -1115,7 +1116,7 @@ pub trait InPlaceInit<T>: Sized {      /// type.      ///      /// If `T: !Unpin` it will not be able to move afterwards. -    fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E> +    fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>      where          E: From<AllocError>; @@ -1123,7 +1124,7 @@ pub trait InPlaceInit<T>: Sized {      /// type.      ///      /// If `T: !Unpin` it will not be able to move afterwards. -    fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Pin<Self>> +    fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Pin<Self>>      where          Error: From<E>,      { @@ -1131,16 +1132,16 @@ pub trait InPlaceInit<T>: Sized {          let init = unsafe {              pin_init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))          }; -        Self::try_pin_init(init) +        Self::try_pin_init(init, flags)      }      /// Use the given initializer to in-place initialize a `T`. -    fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E> +    fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>      where          E: From<AllocError>;      /// Use the given initializer to in-place initialize a `T`. -    fn init<E>(init: impl Init<T, E>) -> error::Result<Self> +    fn init<E>(init: impl Init<T, E>, flags: Flags) -> error::Result<Self>      where          Error: From<E>,      { @@ -1148,17 +1149,17 @@ pub trait InPlaceInit<T>: Sized {          let init = unsafe {              init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))          }; -        Self::try_init(init) +        Self::try_init(init, flags)      }  }  impl<T> InPlaceInit<T> for Box<T> {      #[inline] -    fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E> +    fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>      where          E: From<AllocError>,      { -        let mut this = Box::try_new_uninit()?; +        let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;          let slot = this.as_mut_ptr();          // SAFETY: When init errors/panics, slot will get deallocated but not dropped,          // slot is valid and will not be moved, because we pin it later. @@ -1168,11 +1169,11 @@ impl<T> InPlaceInit<T> for Box<T> {      }      #[inline] -    fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E> +    fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>      where          E: From<AllocError>,      { -        let mut this = Box::try_new_uninit()?; +        let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;          let slot = this.as_mut_ptr();          // SAFETY: When init errors/panics, slot will get deallocated but not dropped,          // slot is valid. @@ -1184,11 +1185,11 @@ impl<T> InPlaceInit<T> for Box<T> {  impl<T> InPlaceInit<T> for UniqueArc<T> {      #[inline] -    fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E> +    fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>      where          E: From<AllocError>,      { -        let mut this = UniqueArc::try_new_uninit()?; +        let mut this = UniqueArc::new_uninit(flags)?;          let slot = this.as_mut_ptr();          // SAFETY: When init errors/panics, slot will get deallocated but not dropped,          // slot is valid and will not be moved, because we pin it later. @@ -1198,11 +1199,11 @@ impl<T> InPlaceInit<T> for UniqueArc<T> {      }      #[inline] -    fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E> +    fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>      where          E: From<AllocError>,      { -        let mut this = UniqueArc::try_new_uninit()?; +        let mut this = UniqueArc::new_uninit(flags)?;          let slot = this.as_mut_ptr();          // SAFETY: When init errors/panics, slot will get deallocated but not dropped,          // slot is valid. @@ -1292,8 +1293,15 @@ impl_zeroable! {      i8, i16, i32, i64, i128, isize,      f32, f64, -    // SAFETY: These are ZSTs, there is nothing to zero. -    {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, Infallible, (), +    // Note: do not add uninhabited types (such as `!` or `core::convert::Infallible`) to this list; +    // creating an instance of an uninhabited type is immediate undefined behavior. For more on +    // uninhabited/empty types, consult The Rustonomicon: +    // <https://doc.rust-lang.org/stable/nomicon/exotic-sizes.html#empty-types>. The Rust Reference +    // also has information on undefined behavior: +    // <https://doc.rust-lang.org/stable/reference/behavior-considered-undefined.html>. +    // +    // SAFETY: These are inhabited ZSTs; there is nothing to zero and a valid value exists. +    {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, (),      // SAFETY: Type is allowed to take any value, including all zeros.      {<T>} MaybeUninit<T>, diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs index cb6e61b6c50b..02ecedc4ae7a 100644 --- a/rust/kernel/init/macros.rs +++ b/rust/kernel/init/macros.rs @@ -250,7 +250,7 @@  //!                     // error type is `Infallible`) we will need to drop this field if there  //!                     // is an error later. This `DropGuard` will drop the field when it gets  //!                     // dropped and has not yet been forgotten. -//!                     let t = unsafe { +//!                     let __t_guard = unsafe {  //!                         ::pinned_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).t))  //!                     };  //!                     // Expansion of `x: 0,`: @@ -261,14 +261,14 @@  //!                         unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).x), x) };  //!                     }  //!                     // We again create a `DropGuard`. -//!                     let x = unsafe { +//!                     let __x_guard = unsafe {  //!                         ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).x))  //!                     };  //!                     // Since initialization has successfully completed, we can now forget  //!                     // the guards. This is not `mem::forget`, since we only have  //!                     // `&DropGuard`. -//!                     ::core::mem::forget(x); -//!                     ::core::mem::forget(t); +//!                     ::core::mem::forget(__x_guard); +//!                     ::core::mem::forget(__t_guard);  //!                     // Here we use the type checker to ensure that every field has been  //!                     // initialized exactly once, since this is `if false` it will never get  //!                     // executed, but still type-checked. @@ -461,16 +461,16 @@  //!             {  //!                 unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).a), a) };  //!             } -//!             let a = unsafe { +//!             let __a_guard = unsafe {  //!                 ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).a))  //!             };  //!             let init = Bar::new(36);  //!             unsafe { data.b(::core::addr_of_mut!((*slot).b), b)? }; -//!             let b = unsafe { +//!             let __b_guard = unsafe {  //!                 ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).b))  //!             }; -//!             ::core::mem::forget(b); -//!             ::core::mem::forget(a); +//!             ::core::mem::forget(__b_guard); +//!             ::core::mem::forget(__a_guard);  //!             #[allow(unreachable_code, clippy::diverging_sub_expression)]  //!             let _ = || {  //!                 unsafe { @@ -538,6 +538,7 @@ macro_rules! __pin_data {          ),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @body({ $($fields:tt)* }),      ) => {          // We now use token munching to iterate through all of the fields. While doing this we @@ -560,6 +561,9 @@ macro_rules! __pin_data {              @impl_generics($($impl_generics)*),              // The 'ty generics', the generics that will need to be specified on the impl blocks.              @ty_generics($($ty_generics)*), +            // The 'decl generics', the generics that need to be specified on the struct +            // definition. +            @decl_generics($($decl_generics)*),              // The where clause of any impl block and the declaration.              @where($($($whr)*)?),              // The remaining fields tokens that need to be processed. @@ -585,6 +589,7 @@ macro_rules! __pin_data {          @name($name:ident),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @where($($whr:tt)*),          // We found a PhantomPinned field, this should generally be pinned!          @fields_munch($field:ident : $($($(::)?core::)?marker::)?PhantomPinned, $($rest:tt)*), @@ -607,6 +612,7 @@ macro_rules! __pin_data {              @name($name),              @impl_generics($($impl_generics)*),              @ty_generics($($ty_generics)*), +            @decl_generics($($decl_generics)*),              @where($($whr)*),              @fields_munch($($rest)*),              @pinned($($pinned)* $($accum)* $field: ::core::marker::PhantomPinned,), @@ -623,6 +629,7 @@ macro_rules! __pin_data {          @name($name:ident),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @where($($whr:tt)*),          // We reached the field declaration.          @fields_munch($field:ident : $type:ty, $($rest:tt)*), @@ -640,6 +647,7 @@ macro_rules! __pin_data {              @name($name),              @impl_generics($($impl_generics)*),              @ty_generics($($ty_generics)*), +            @decl_generics($($decl_generics)*),              @where($($whr)*),              @fields_munch($($rest)*),              @pinned($($pinned)* $($accum)* $field: $type,), @@ -656,6 +664,7 @@ macro_rules! __pin_data {          @name($name:ident),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @where($($whr:tt)*),          // We reached the field declaration.          @fields_munch($field:ident : $type:ty, $($rest:tt)*), @@ -673,6 +682,7 @@ macro_rules! __pin_data {              @name($name),              @impl_generics($($impl_generics)*),              @ty_generics($($ty_generics)*), +            @decl_generics($($decl_generics)*),              @where($($whr)*),              @fields_munch($($rest)*),              @pinned($($pinned)*), @@ -689,6 +699,7 @@ macro_rules! __pin_data {          @name($name:ident),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @where($($whr:tt)*),          // We found the `#[pin]` attr.          @fields_munch(#[pin] $($rest:tt)*), @@ -705,6 +716,7 @@ macro_rules! __pin_data {              @name($name),              @impl_generics($($impl_generics)*),              @ty_generics($($ty_generics)*), +            @decl_generics($($decl_generics)*),              @where($($whr)*),              @fields_munch($($rest)*),              // We do not include `#[pin]` in the list of attributes, since it is not actually an @@ -724,6 +736,7 @@ macro_rules! __pin_data {          @name($name:ident),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @where($($whr:tt)*),          // We reached the field declaration with visibility, for simplicity we only munch the          // visibility and put it into `$accum`. @@ -741,6 +754,7 @@ macro_rules! __pin_data {              @name($name),              @impl_generics($($impl_generics)*),              @ty_generics($($ty_generics)*), +            @decl_generics($($decl_generics)*),              @where($($whr)*),              @fields_munch($field $($rest)*),              @pinned($($pinned)*), @@ -757,6 +771,7 @@ macro_rules! __pin_data {          @name($name:ident),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @where($($whr:tt)*),          // Some other attribute, just put it into `$accum`.          @fields_munch(#[$($attr:tt)*] $($rest:tt)*), @@ -773,6 +788,7 @@ macro_rules! __pin_data {              @name($name),              @impl_generics($($impl_generics)*),              @ty_generics($($ty_generics)*), +            @decl_generics($($decl_generics)*),              @where($($whr)*),              @fields_munch($($rest)*),              @pinned($($pinned)*), @@ -789,6 +805,7 @@ macro_rules! __pin_data {          @name($name:ident),          @impl_generics($($impl_generics:tt)*),          @ty_generics($($ty_generics:tt)*), +        @decl_generics($($decl_generics:tt)*),          @where($($whr:tt)*),          // We reached the end of the fields, plus an optional additional comma, since we added one          // before and the user is also allowed to put a trailing comma. @@ -802,7 +819,7 @@ macro_rules! __pin_data {      ) => {          // Declare the struct with all fields in the correct order.          $($struct_attrs)* -        $vis struct $name <$($impl_generics)*> +        $vis struct $name <$($decl_generics)*>          where $($whr)*          {              $($fields)* @@ -1192,14 +1209,14 @@ macro_rules! __init_internal {          // We use `paste!` to create new hygiene for `$field`.          ::kernel::macros::paste! {              // SAFETY: We forget the guard later when initialization has succeeded. -            let [<$field>] = unsafe { +            let [< __ $field _guard >] = unsafe {                  $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))              };              $crate::__init_internal!(init_slot($use_data):                  @data($data),                  @slot($slot), -                @guards([<$field>], $($guards,)*), +                @guards([< __ $field _guard >], $($guards,)*),                  @munch_fields($($rest)*),              );          } @@ -1223,14 +1240,14 @@ macro_rules! __init_internal {          // We use `paste!` to create new hygiene for `$field`.          ::kernel::macros::paste! {              // SAFETY: We forget the guard later when initialization has succeeded. -            let [<$field>] = unsafe { +            let [< __ $field _guard >] = unsafe {                  $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))              };              $crate::__init_internal!(init_slot():                  @data($data),                  @slot($slot), -                @guards([<$field>], $($guards,)*), +                @guards([< __ $field _guard >], $($guards,)*),                  @munch_fields($($rest)*),              );          } @@ -1255,14 +1272,14 @@ macro_rules! __init_internal {          // We use `paste!` to create new hygiene for `$field`.          ::kernel::macros::paste! {              // SAFETY: We forget the guard later when initialization has succeeded. -            let [<$field>] = unsafe { +            let [< __ $field _guard >] = unsafe {                  $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))              };              $crate::__init_internal!(init_slot($($use_data)?):                  @data($data),                  @slot($slot), -                @guards([<$field>], $($guards,)*), +                @guards([< __ $field _guard >], $($guards,)*),                  @munch_fields($($rest)*),              );          } diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index be68d5e567b1..fbd91a48ff8b 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -12,11 +12,9 @@  //! do so first instead of bypassing this crate.  #![no_std] -#![feature(allocator_api)]  #![feature(coerce_unsized)]  #![feature(dispatch_from_dyn)]  #![feature(new_uninit)] -#![feature(offset_of)]  #![feature(receiver_trait)]  #![feature(unsize)] @@ -28,9 +26,7 @@ compile_error!("Missing kernel configuration for conditional compilation");  // Allow proc-macros to refer to `::kernel` inside the `kernel` crate (this crate).  extern crate self as kernel; -#[cfg(not(test))] -#[cfg(not(testlib))] -mod allocator; +pub mod alloc;  mod build_assert;  pub mod error;  pub mod init; @@ -65,7 +61,7 @@ const __LOG_PREFIX: &[u8] = b"rust_kernel\0";  /// The top level entrypoint to implementing a kernel module.  ///  /// For any teardown or cleanup operations, your type may implement [`Drop`]. -pub trait Module: Sized + Sync { +pub trait Module: Sized + Sync + Send {      /// Called at module initialization time.      ///      /// Use this method to perform whatever setup or registration your module @@ -92,6 +88,13 @@ impl ThisModule {      pub const unsafe fn from_ptr(ptr: *mut bindings::module) -> ThisModule {          ThisModule(ptr)      } + +    /// Access the raw pointer for this module. +    /// +    /// It is up to the user to use it correctly. +    pub const fn as_ptr(&self) -> *mut bindings::module { +        self.0 +    }  }  #[cfg(not(any(testlib, test)))] diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs index 96e09c6e8530..fd40b703d224 100644 --- a/rust/kernel/net/phy.rs +++ b/rust/kernel/net/phy.rs @@ -6,7 +6,7 @@  //!  //! C headers: [`include/linux/phy.h`](srctree/include/linux/phy.h). -use crate::{bindings, error::*, prelude::*, str::CStr, types::Opaque}; +use crate::{error::*, prelude::*, types::Opaque};  use core::marker::PhantomData; @@ -640,6 +640,10 @@ pub struct Registration {      drivers: Pin<&'static mut [DriverVTable]>,  } +// SAFETY: The only action allowed in a `Registration` instance is dropping it, which is safe to do +// from any thread because `phy_drivers_unregister` can be called from any thread context. +unsafe impl Send for Registration {} +  impl Registration {      /// Registers a PHY driver.      pub fn register( diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs index ae21600970b3..b37a0b3180fb 100644 --- a/rust/kernel/prelude.rs +++ b/rust/kernel/prelude.rs @@ -14,6 +14,8 @@  #[doc(no_inline)]  pub use core::pin::Pin; +pub use crate::alloc::{box_ext::BoxExt, flags::*, vec_ext::VecExt}; +  #[doc(no_inline)]  pub use alloc::{boxed::Box, vec::Vec}; diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs index 9b13aca832c2..a78aa3514a0a 100644 --- a/rust/kernel/print.rs +++ b/rust/kernel/print.rs @@ -13,9 +13,6 @@ use core::{  use crate::str::RawFormatter; -#[cfg(CONFIG_PRINTK)] -use crate::bindings; -  // Called from `vsprintf` with format specifier `%pA`.  #[no_mangle]  unsafe extern "C" fn rust_fmt_argument( @@ -35,8 +32,6 @@ unsafe extern "C" fn rust_fmt_argument(  /// Public but hidden since it should only be used from public macros.  #[doc(hidden)]  pub mod format_strings { -    use crate::bindings; -      /// The length we copy from the `KERN_*` kernel prefixes.      const LENGTH_PREFIX: usize = 2; diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs index 388d6a5147a2..39679a960c1a 100644 --- a/rust/kernel/std_vendor.rs +++ b/rust/kernel/std_vendor.rs @@ -146,15 +146,16 @@ macro_rules! dbg {      // `$val` expression could be a block (`{ .. }`), in which case the `pr_info!`      // will be malformed.      () => { -        $crate::pr_info!("[{}:{}]\n", ::core::file!(), ::core::line!()) +        $crate::pr_info!("[{}:{}:{}]\n", ::core::file!(), ::core::line!(), ::core::column!())      };      ($val:expr $(,)?) => {          // Use of `match` here is intentional because it affects the lifetimes          // of temporaries - https://stackoverflow.com/a/48732525/1063961          match $val {              tmp => { -                $crate::pr_info!("[{}:{}] {} = {:#?}\n", -                    ::core::file!(), ::core::line!(), ::core::stringify!($val), &tmp); +                $crate::pr_info!("[{}:{}:{}] {} = {:#?}\n", +                    ::core::file!(), ::core::line!(), ::core::column!(), +                    ::core::stringify!($val), &tmp);                  tmp              }          } diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs index 925ced8fdc61..bb8d4f41475b 100644 --- a/rust/kernel/str.rs +++ b/rust/kernel/str.rs @@ -2,15 +2,12 @@  //! String representations. -use alloc::alloc::AllocError; +use crate::alloc::{flags::*, vec_ext::VecExt, AllocError};  use alloc::vec::Vec;  use core::fmt::{self, Write}; -use core::ops::{self, Deref, Index}; +use core::ops::{self, Deref, DerefMut, Index}; -use crate::{ -    bindings, -    error::{code::*, Error}, -}; +use crate::error::{code::*, Error};  /// Byte string without UTF-8 validity guarantee.  #[repr(transparent)] @@ -236,6 +233,19 @@ impl CStr {          unsafe { core::mem::transmute(bytes) }      } +    /// Creates a mutable [`CStr`] from a `[u8]` without performing any +    /// additional checks. +    /// +    /// # Safety +    /// +    /// `bytes` *must* end with a `NUL` byte, and should only have a single +    /// `NUL` byte (or the string will be truncated). +    #[inline] +    pub unsafe fn from_bytes_with_nul_unchecked_mut(bytes: &mut [u8]) -> &mut CStr { +        // SAFETY: Properties of `bytes` guaranteed by the safety precondition. +        unsafe { &mut *(bytes as *mut [u8] as *mut CStr) } +    } +      /// Returns a C pointer to the string.      #[inline]      pub const fn as_char_ptr(&self) -> *const core::ffi::c_char { @@ -299,6 +309,70 @@ impl CStr {      pub fn to_cstring(&self) -> Result<CString, AllocError> {          CString::try_from(self)      } + +    /// Converts this [`CStr`] to its ASCII lower case equivalent in-place. +    /// +    /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', +    /// but non-ASCII letters are unchanged. +    /// +    /// To return a new lowercased value without modifying the existing one, use +    /// [`to_ascii_lowercase()`]. +    /// +    /// [`to_ascii_lowercase()`]: #method.to_ascii_lowercase +    pub fn make_ascii_lowercase(&mut self) { +        // INVARIANT: This doesn't introduce or remove NUL bytes in the C +        // string. +        self.0.make_ascii_lowercase(); +    } + +    /// Converts this [`CStr`] to its ASCII upper case equivalent in-place. +    /// +    /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', +    /// but non-ASCII letters are unchanged. +    /// +    /// To return a new uppercased value without modifying the existing one, use +    /// [`to_ascii_uppercase()`]. +    /// +    /// [`to_ascii_uppercase()`]: #method.to_ascii_uppercase +    pub fn make_ascii_uppercase(&mut self) { +        // INVARIANT: This doesn't introduce or remove NUL bytes in the C +        // string. +        self.0.make_ascii_uppercase(); +    } + +    /// Returns a copy of this [`CString`] where each character is mapped to its +    /// ASCII lower case equivalent. +    /// +    /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', +    /// but non-ASCII letters are unchanged. +    /// +    /// To lowercase the value in-place, use [`make_ascii_lowercase`]. +    /// +    /// [`make_ascii_lowercase`]: str::make_ascii_lowercase +    pub fn to_ascii_lowercase(&self) -> Result<CString, AllocError> { +        let mut s = self.to_cstring()?; + +        s.make_ascii_lowercase(); + +        Ok(s) +    } + +    /// Returns a copy of this [`CString`] where each character is mapped to its +    /// ASCII upper case equivalent. +    /// +    /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', +    /// but non-ASCII letters are unchanged. +    /// +    /// To uppercase the value in-place, use [`make_ascii_uppercase`]. +    /// +    /// [`make_ascii_uppercase`]: str::make_ascii_uppercase +    pub fn to_ascii_uppercase(&self) -> Result<CString, AllocError> { +        let mut s = self.to_cstring()?; + +        s.make_ascii_uppercase(); + +        Ok(s) +    }  }  impl fmt::Display for CStr { @@ -729,7 +803,7 @@ impl CString {          let size = f.bytes_written();          // Allocate a vector with the required number of bytes, and write to it. -        let mut buf = Vec::try_with_capacity(size)?; +        let mut buf = <Vec<_> as VecExt<_>>::with_capacity(size, GFP_KERNEL)?;          // SAFETY: The buffer stored in `buf` is at least of size `size` and is valid for writes.          let mut f = unsafe { Formatter::from_buffer(buf.as_mut_ptr(), size) };          f.write_fmt(args)?; @@ -764,13 +838,21 @@ impl Deref for CString {      }  } +impl DerefMut for CString { +    fn deref_mut(&mut self) -> &mut Self::Target { +        // SAFETY: A `CString` is always NUL-terminated and contains no other +        // NUL bytes. +        unsafe { CStr::from_bytes_with_nul_unchecked_mut(self.buf.as_mut_slice()) } +    } +} +  impl<'a> TryFrom<&'a CStr> for CString {      type Error = AllocError;      fn try_from(cstr: &'a CStr) -> Result<CString, AllocError> {          let mut buf = Vec::new(); -        buf.try_extend_from_slice(cstr.as_bytes_with_nul()) +        <Vec<_> as VecExt<_>>::extend_from_slice(&mut buf, cstr.as_bytes_with_nul(), GFP_KERNEL)              .map_err(|_| AllocError)?;          // INVARIANT: The `CStr` and `CString` types have the same invariants for diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index c983f63fd56e..0ab20975a3b5 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -37,6 +37,12 @@ impl LockClassKey {      }  } +impl Default for LockClassKey { +    fn default() -> Self { +        Self::new() +    } +} +  /// Defines a new static lock class and returns a pointer to it.  #[doc(hidden)]  #[macro_export] diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index 7d4c4bf58388..3673496c2363 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -16,7 +16,7 @@  //! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html  use crate::{ -    bindings, +    alloc::{box_ext::BoxExt, AllocError, Flags},      error::{self, Error},      init::{self, InPlaceInit, Init, PinInit},      try_init, @@ -24,7 +24,7 @@ use crate::{  };  use alloc::boxed::Box;  use core::{ -    alloc::{AllocError, Layout}, +    alloc::Layout,      fmt,      marker::{PhantomData, Unsize},      mem::{ManuallyDrop, MaybeUninit}, @@ -57,7 +57,7 @@ mod std_vendor;  /// }  ///  /// // Create a refcounted instance of `Example`. -/// let obj = Arc::try_new(Example { a: 10, b: 20 })?; +/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;  ///  /// // Get a new pointer to `obj` and increment the refcount.  /// let cloned = obj.clone(); @@ -96,7 +96,7 @@ mod std_vendor;  ///     }  /// }  /// -/// let obj = Arc::try_new(Example { a: 10, b: 20 })?; +/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;  /// obj.use_reference();  /// obj.take_over();  /// # Ok::<(), Error>(()) @@ -119,7 +119,7 @@ mod std_vendor;  /// impl MyTrait for Example {}  ///  /// // `obj` has type `Arc<Example>`. -/// let obj: Arc<Example> = Arc::try_new(Example)?; +/// let obj: Arc<Example> = Arc::new(Example, GFP_KERNEL)?;  ///  /// // `coerced` has type `Arc<dyn MyTrait>`.  /// let coerced: Arc<dyn MyTrait> = obj; @@ -137,6 +137,39 @@ struct ArcInner<T: ?Sized> {      data: T,  } +impl<T: ?Sized> ArcInner<T> { +    /// Converts a pointer to the contents of an [`Arc`] into a pointer to the [`ArcInner`]. +    /// +    /// # Safety +    /// +    /// `ptr` must have been returned by a previous call to [`Arc::into_raw`], and the `Arc` must +    /// not yet have been destroyed. +    unsafe fn container_of(ptr: *const T) -> NonNull<ArcInner<T>> { +        let refcount_layout = Layout::new::<bindings::refcount_t>(); +        // SAFETY: The caller guarantees that the pointer is valid. +        let val_layout = Layout::for_value(unsafe { &*ptr }); +        // SAFETY: We're computing the layout of a real struct that existed when compiling this +        // binary, so its layout is not so large that it can trigger arithmetic overflow. +        let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 }; + +        // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and +        // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field. +        // +        // This is documented at: +        // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>. +        let ptr = ptr as *const ArcInner<T>; + +        // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the +        // pointer, since it originates from a previous call to `Arc::into_raw` on an `Arc` that is +        // still valid. +        let ptr = unsafe { ptr.byte_sub(val_offset) }; + +        // SAFETY: The pointer can't be null since you can't have an `ArcInner<T>` value at the null +        // address. +        unsafe { NonNull::new_unchecked(ptr.cast_mut()) } +    } +} +  // This is to allow [`Arc`] (and variants) to be used as the type of `self`.  impl<T: ?Sized> core::ops::Receiver for Arc<T> {} @@ -162,7 +195,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}  impl<T> Arc<T> {      /// Constructs a new reference counted instance of `T`. -    pub fn try_new(contents: T) -> Result<Self, AllocError> { +    pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {          // INVARIANT: The refcount is initialised to a non-zero value.          let value = ArcInner {              // SAFETY: There are no safety requirements for this FFI call. @@ -170,7 +203,7 @@ impl<T> Arc<T> {              data: contents,          }; -        let inner = Box::try_new(value)?; +        let inner = <Box<_> as BoxExt<_>>::new(value, flags)?;          // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new          // `Arc` object. @@ -181,22 +214,22 @@ impl<T> Arc<T> {      ///      /// If `T: !Unpin` it will not be able to move afterwards.      #[inline] -    pub fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Self> +    pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self>      where          Error: From<E>,      { -        UniqueArc::pin_init(init).map(|u| u.into()) +        UniqueArc::pin_init(init, flags).map(|u| u.into())      }      /// Use the given initializer to in-place initialize a `T`.      ///      /// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned.      #[inline] -    pub fn init<E>(init: impl Init<T, E>) -> error::Result<Self> +    pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> error::Result<Self>      where          Error: From<E>,      { -        UniqueArc::init(init).map(|u| u.into()) +        UniqueArc::init(init, flags).map(|u| u.into())      }  } @@ -232,27 +265,13 @@ impl<T: ?Sized> Arc<T> {      /// `ptr` must have been returned by a previous call to [`Arc::into_raw`]. Additionally, it      /// must not be called more than once for each previous call to [`Arc::into_raw`].      pub unsafe fn from_raw(ptr: *const T) -> Self { -        let refcount_layout = Layout::new::<bindings::refcount_t>(); -        // SAFETY: The caller guarantees that the pointer is valid. -        let val_layout = Layout::for_value(unsafe { &*ptr }); -        // SAFETY: We're computing the layout of a real struct that existed when compiling this -        // binary, so its layout is not so large that it can trigger arithmetic overflow. -        let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 }; - -        // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and -        // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field. -        // -        // This is documented at: -        // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>. -        let ptr = ptr as *const ArcInner<T>; - -        // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the -        // pointer, since it originates from a previous call to `Arc::into_raw` and is still valid. -        let ptr = unsafe { ptr.byte_sub(val_offset) }; +        // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an +        // `Arc` that is still valid. +        let ptr = unsafe { ArcInner::container_of(ptr) };          // SAFETY: By the safety requirements we know that `ptr` came from `Arc::into_raw`, so the          // reference count held then will be owned by the new `Arc` object. -        unsafe { Self::from_inner(NonNull::new_unchecked(ptr.cast_mut())) } +        unsafe { Self::from_inner(ptr) }      }      /// Returns an [`ArcBorrow`] from the given [`Arc`]. @@ -271,6 +290,68 @@ impl<T: ?Sized> Arc<T> {      pub fn ptr_eq(this: &Self, other: &Self) -> bool {          core::ptr::eq(this.ptr.as_ptr(), other.ptr.as_ptr())      } + +    /// Converts this [`Arc`] into a [`UniqueArc`], or destroys it if it is not unique. +    /// +    /// When this destroys the `Arc`, it does so while properly avoiding races. This means that +    /// this method will never call the destructor of the value. +    /// +    /// # Examples +    /// +    /// ``` +    /// use kernel::sync::{Arc, UniqueArc}; +    /// +    /// let arc = Arc::new(42, GFP_KERNEL)?; +    /// let unique_arc = arc.into_unique_or_drop(); +    /// +    /// // The above conversion should succeed since refcount of `arc` is 1. +    /// assert!(unique_arc.is_some()); +    /// +    /// assert_eq!(*(unique_arc.unwrap()), 42); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    /// +    /// ``` +    /// use kernel::sync::{Arc, UniqueArc}; +    /// +    /// let arc = Arc::new(42, GFP_KERNEL)?; +    /// let another = arc.clone(); +    /// +    /// let unique_arc = arc.into_unique_or_drop(); +    /// +    /// // The above conversion should fail since refcount of `arc` is >1. +    /// assert!(unique_arc.is_none()); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    pub fn into_unique_or_drop(self) -> Option<Pin<UniqueArc<T>>> { +        // We will manually manage the refcount in this method, so we disable the destructor. +        let me = ManuallyDrop::new(self); +        // SAFETY: We own a refcount, so the pointer is still valid. +        let refcount = unsafe { me.ptr.as_ref() }.refcount.get(); + +        // If the refcount reaches a non-zero value, then we have destroyed this `Arc` and will +        // return without further touching the `Arc`. If the refcount reaches zero, then there are +        // no other arcs, and we can create a `UniqueArc`. +        // +        // SAFETY: We own a refcount, so the pointer is not dangling. +        let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) }; +        if is_zero { +            // SAFETY: We have exclusive access to the arc, so we can perform unsynchronized +            // accesses to the refcount. +            unsafe { core::ptr::write(refcount, bindings::REFCOUNT_INIT(1)) }; + +            // INVARIANT: We own the only refcount to this arc, so we may create a `UniqueArc`. We +            // must pin the `UniqueArc` because the values was previously in an `Arc`, and they pin +            // their values. +            Some(Pin::from(UniqueArc { +                inner: ManuallyDrop::into_inner(me), +            })) +        } else { +            None +        } +    }  }  impl<T: 'static> ForeignOwnable for Arc<T> { @@ -387,7 +468,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {  ///     e.into()  /// }  /// -/// let obj = Arc::try_new(Example)?; +/// let obj = Arc::new(Example, GFP_KERNEL)?;  /// let cloned = do_something(obj.as_arc_borrow());  ///  /// // Assert that both `obj` and `cloned` point to the same underlying object. @@ -411,7 +492,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {  ///     }  /// }  /// -/// let obj = Arc::try_new(Example { a: 10, b: 20 })?; +/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;  /// obj.as_arc_borrow().use_reference();  /// # Ok::<(), Error>(())  /// ``` @@ -453,6 +534,27 @@ impl<T: ?Sized> ArcBorrow<'_, T> {              _p: PhantomData,          }      } + +    /// Creates an [`ArcBorrow`] to an [`Arc`] that has previously been deconstructed with +    /// [`Arc::into_raw`]. +    /// +    /// # Safety +    /// +    /// * The provided pointer must originate from a call to [`Arc::into_raw`]. +    /// * For the duration of the lifetime annotated on this `ArcBorrow`, the reference count must +    ///   not hit zero. +    /// * For the duration of the lifetime annotated on this `ArcBorrow`, there must not be a +    ///   [`UniqueArc`] reference to this value. +    pub unsafe fn from_raw(ptr: *const T) -> Self { +        // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an +        // `Arc` that is still valid. +        let ptr = unsafe { ArcInner::container_of(ptr) }; + +        // SAFETY: The caller promises that the value remains valid since the reference count must +        // not hit zero, and no mutable reference will be created since that would involve a +        // `UniqueArc`. +        unsafe { Self::new(ptr) } +    }  }  impl<T: ?Sized> From<ArcBorrow<'_, T>> for Arc<T> { @@ -499,7 +601,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {  /// }  ///  /// fn test() -> Result<Arc<Example>> { -///     let mut x = UniqueArc::try_new(Example { a: 10, b: 20 })?; +///     let mut x = UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;  ///     x.a += 1;  ///     x.b += 1;  ///     Ok(x.into()) @@ -522,7 +624,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {  /// }  ///  /// fn test() -> Result<Arc<Example>> { -///     let x = UniqueArc::try_new_uninit()?; +///     let x = UniqueArc::new_uninit(GFP_KERNEL)?;  ///     Ok(x.write(Example { a: 10, b: 20 }).into())  /// }  /// @@ -542,7 +644,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {  /// }  ///  /// fn test() -> Result<Arc<Example>> { -///     let mut pinned = Pin::from(UniqueArc::try_new(Example { a: 10, b: 20 })?); +///     let mut pinned = Pin::from(UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?);  ///     // We can modify `pinned` because it is `Unpin`.  ///     pinned.as_mut().a += 1;  ///     Ok(pinned.into()) @@ -556,21 +658,24 @@ pub struct UniqueArc<T: ?Sized> {  impl<T> UniqueArc<T> {      /// Tries to allocate a new [`UniqueArc`] instance. -    pub fn try_new(value: T) -> Result<Self, AllocError> { +    pub fn new(value: T, flags: Flags) -> Result<Self, AllocError> {          Ok(Self {              // INVARIANT: The newly-created object has a refcount of 1. -            inner: Arc::try_new(value)?, +            inner: Arc::new(value, flags)?,          })      }      /// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet. -    pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>, AllocError> { +    pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {          // INVARIANT: The refcount is initialised to a non-zero value. -        let inner = Box::try_init::<AllocError>(try_init!(ArcInner { -            // SAFETY: There are no safety requirements for this FFI call. -            refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }), -            data <- init::uninit::<T, AllocError>(), -        }? AllocError))?; +        let inner = Box::try_init::<AllocError>( +            try_init!(ArcInner { +                // SAFETY: There are no safety requirements for this FFI call. +                refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }), +                data <- init::uninit::<T, AllocError>(), +            }? AllocError), +            flags, +        )?;          Ok(UniqueArc {              // INVARIANT: The newly-created object has a refcount of 1.              // SAFETY: The pointer from the `Box` is valid. diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs index 0c3671caffeb..2b306afbe56d 100644 --- a/rust/kernel/sync/condvar.rs +++ b/rust/kernel/sync/condvar.rs @@ -7,7 +7,6 @@  use super::{lock::Backend, lock::Guard, LockClassKey};  use crate::{ -    bindings,      init::PinInit,      pin_init,      str::CStr, @@ -75,7 +74,7 @@ pub use new_condvar;  ///     Box::pin_init(pin_init!(Example {  ///         value <- new_mutex!(0),  ///         value_changed <- new_condvar!(), -///     })) +///     }), GFP_KERNEL)  /// }  /// ```  /// diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 5b5c8efe427a..f6c34ca4d819 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -6,7 +6,7 @@  //! spinlocks, raw spinlocks) to be provided with minimal effort.  use super::LockClassKey; -use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard}; +use crate::{init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};  use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};  use macros::pin_data; diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs index ef4c4634d294..30632070ee67 100644 --- a/rust/kernel/sync/lock/mutex.rs +++ b/rust/kernel/sync/lock/mutex.rs @@ -4,8 +4,6 @@  //!  //! This module allows Rust code to use the kernel's `struct mutex`. -use crate::bindings; -  /// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class.  ///  /// It uses the name if one is given, otherwise it generates one based on the file name and line @@ -60,7 +58,7 @@ pub use new_mutex;  /// }  ///  /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new())?; +/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;  /// assert_eq!(e.c, 10);  /// assert_eq!(e.d.lock().a, 20);  /// assert_eq!(e.d.lock().b, 30); diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index 0b22c635634f..ea5c5bc1ce12 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -4,8 +4,6 @@  //!  //! This module allows Rust code to use the kernel's `spinlock_t`. -use crate::bindings; -  /// Creates a [`SpinLock`] initialiser with the given name and a newly-created lock class.  ///  /// It uses the name if one is given, otherwise it generates one based on the file name and line @@ -58,7 +56,7 @@ pub use new_spinlock;  /// }  ///  /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new())?; +/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;  /// assert_eq!(e.c, 10);  /// assert_eq!(e.d.lock().a, 20);  /// assert_eq!(e.d.lock().b, 30); diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs index ca6e7e31d71c..55dff7e088bf 100644 --- a/rust/kernel/task.rs +++ b/rust/kernel/task.rs @@ -4,7 +4,7 @@  //!  //! C header: [`include/linux/sched.h`](srctree/include/linux/sched.h). -use crate::{bindings, types::Opaque}; +use crate::types::Opaque;  use core::{      ffi::{c_int, c_long, c_uint},      marker::PhantomData, diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs index 25a896eed468..e3bb5e89f88d 100644 --- a/rust/kernel/time.rs +++ b/rust/kernel/time.rs @@ -4,6 +4,12 @@  //!  //! This module contains the kernel APIs related to time and timers that  //! have been ported or wrapped for usage by Rust code in the kernel. +//! +//! C header: [`include/linux/jiffies.h`](srctree/include/linux/jiffies.h). +//! C header: [`include/linux/ktime.h`](srctree/include/linux/ktime.h). + +/// The number of nanoseconds per millisecond. +pub const NSEC_PER_MSEC: i64 = bindings::NSEC_PER_MSEC as i64;  /// The time unit of Linux kernel. One jiffy equals (1/HZ) second.  pub type Jiffies = core::ffi::c_ulong; @@ -18,3 +24,60 @@ pub fn msecs_to_jiffies(msecs: Msecs) -> Jiffies {      // matter what the argument is.      unsafe { bindings::__msecs_to_jiffies(msecs) }  } + +/// A Rust wrapper around a `ktime_t`. +#[repr(transparent)] +#[derive(Copy, Clone)] +pub struct Ktime { +    inner: bindings::ktime_t, +} + +impl Ktime { +    /// Create a `Ktime` from a raw `ktime_t`. +    #[inline] +    pub fn from_raw(inner: bindings::ktime_t) -> Self { +        Self { inner } +    } + +    /// Get the current time using `CLOCK_MONOTONIC`. +    #[inline] +    pub fn ktime_get() -> Self { +        // SAFETY: It is always safe to call `ktime_get` outside of NMI context. +        Self::from_raw(unsafe { bindings::ktime_get() }) +    } + +    /// Divide the number of nanoseconds by a compile-time constant. +    #[inline] +    fn divns_constant<const DIV: i64>(self) -> i64 { +        self.to_ns() / DIV +    } + +    /// Returns the number of nanoseconds. +    #[inline] +    pub fn to_ns(self) -> i64 { +        self.inner +    } + +    /// Returns the number of milliseconds. +    #[inline] +    pub fn to_ms(self) -> i64 { +        self.divns_constant::<NSEC_PER_MSEC>() +    } +} + +/// Returns the number of milliseconds between two ktimes. +#[inline] +pub fn ktime_ms_delta(later: Ktime, earlier: Ktime) -> i64 { +    (later - earlier).to_ms() +} + +impl core::ops::Sub for Ktime { +    type Output = Ktime; + +    #[inline] +    fn sub(self, other: Ktime) -> Ktime { +        Self { +            inner: self.inner - other.inner, +        } +    } +} diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index aa77bad9bce4..2e7c9008621f 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -157,11 +157,11 @@ impl ForeignOwnable for () {  ///     let mut vec =  ///         ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));  /// -///     vec.try_push(10u8)?; +///     vec.push(10u8, GFP_KERNEL)?;  ///     if arg {  ///         return Ok(());  ///     } -///     vec.try_push(20u8)?; +///     vec.push(20u8, GFP_KERNEL)?;  ///     Ok(())  /// }  /// @@ -270,7 +270,7 @@ impl<T> Opaque<T> {      }      /// Returns a raw pointer to the opaque data. -    pub fn get(&self) -> *mut T { +    pub const fn get(&self) -> *mut T {          UnsafeCell::get(&self.value).cast::<T>()      } diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs index 480cb292e7c2..1cec63a2aea8 100644 --- a/rust/kernel/workqueue.rs +++ b/rust/kernel/workqueue.rs @@ -33,7 +33,6 @@  //! we do not need to specify ids for the fields.  //!  //! ``` -//! use kernel::prelude::*;  //! use kernel::sync::Arc;  //! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};  //! @@ -53,7 +52,7 @@  //!         Arc::pin_init(pin_init!(MyStruct {  //!             value,  //!             work <- new_work!("MyStruct::work"), -//!         })) +//!         }), GFP_KERNEL)  //!     }  //! }  //! @@ -75,7 +74,6 @@  //! The following example shows how multiple `work_struct` fields can be used:  //!  //! ``` -//! use kernel::prelude::*;  //! use kernel::sync::Arc;  //! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};  //! @@ -101,7 +99,7 @@  //!             value_2,  //!             work_1 <- new_work!("MyStruct::work_1"),  //!             work_2 <- new_work!("MyStruct::work_2"), -//!         })) +//!         }), GFP_KERNEL)  //!     }  //! }  //! @@ -132,11 +130,9 @@  //!  //! C header: [`include/linux/workqueue.h`](srctree/include/linux/workqueue.h) -use crate::{bindings, prelude::*, sync::Arc, sync::LockClassKey, types::Opaque}; -use alloc::alloc::AllocError; -use alloc::boxed::Box; +use crate::alloc::{AllocError, Flags}; +use crate::{prelude::*, sync::Arc, sync::LockClassKey, types::Opaque};  use core::marker::PhantomData; -use core::pin::Pin;  /// Creates a [`Work`] initialiser with the given name and a newly-created lock class.  #[macro_export] @@ -210,13 +206,17 @@ impl Queue {      /// Tries to spawn the given function or closure as a work item.      ///      /// This method can fail because it allocates memory to store the work item. -    pub fn try_spawn<T: 'static + Send + FnOnce()>(&self, func: T) -> Result<(), AllocError> { +    pub fn try_spawn<T: 'static + Send + FnOnce()>( +        &self, +        flags: Flags, +        func: T, +    ) -> Result<(), AllocError> {          let init = pin_init!(ClosureWork {              work <- new_work!("Queue::try_spawn"),              func: Some(func),          }); -        self.enqueue(Box::pin_init(init).map_err(|_| AllocError)?); +        self.enqueue(Box::pin_init(init, flags).map_err(|_| AllocError)?);          Ok(())      }  } @@ -346,8 +346,10 @@ pub trait WorkItem<const ID: u64 = 0> {  /// This is a helper type used to associate a `work_struct` with the [`WorkItem`] that uses it.  ///  /// [`run`]: WorkItemPointer::run +#[pin_data]  #[repr(transparent)]  pub struct Work<T: ?Sized, const ID: u64 = 0> { +    #[pin]      work: Opaque<bindings::work_struct>,      _inner: PhantomData<T>,  } @@ -369,21 +371,22 @@ impl<T: ?Sized, const ID: u64> Work<T, ID> {      where          T: WorkItem<ID>,      { -        // SAFETY: The `WorkItemPointer` implementation promises that `run` can be used as the work -        // item function. -        unsafe { -            kernel::init::pin_init_from_closure(move |slot| { -                let slot = Self::raw_get(slot); -                bindings::init_work_with_key( -                    slot, -                    Some(T::Pointer::run), -                    false, -                    name.as_char_ptr(), -                    key.as_ptr(), -                ); -                Ok(()) -            }) -        } +        pin_init!(Self { +            work <- Opaque::ffi_init(|slot| { +                // SAFETY: The `WorkItemPointer` implementation promises that `run` can be used as +                // the work item function. +                unsafe { +                    bindings::init_work_with_key( +                        slot, +                        Some(T::Pointer::run), +                        false, +                        name.as_char_ptr(), +                        key.as_ptr(), +                    ) +                } +            }), +            _inner: PhantomData, +        })      }      /// Get a pointer to the inner `work_struct`. @@ -408,7 +411,6 @@ impl<T: ?Sized, const ID: u64> Work<T, ID> {  /// like this:  ///  /// ```no_run -/// use kernel::prelude::*;  /// use kernel::workqueue::{impl_has_work, Work};  ///  /// struct MyWorkItem { |