aboutsummaryrefslogtreecommitdiff
path: root/rust/kernel/sync
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/sync')
-rw-r--r--rust/kernel/sync/arc.rs207
-rw-r--r--rust/kernel/sync/condvar.rs133
-rw-r--r--rust/kernel/sync/lock.rs21
-rw-r--r--rust/kernel/sync/lock/mutex.rs9
-rw-r--r--rust/kernel/sync/lock/spinlock.rs11
-rw-r--r--rust/kernel/sync/locked_by.rs7
6 files changed, 282 insertions, 106 deletions
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 77cdbcf7bd2e..3673496c2363 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -16,7 +16,7 @@
//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
use crate::{
- bindings,
+ alloc::{box_ext::BoxExt, AllocError, Flags},
error::{self, Error},
init::{self, InPlaceInit, Init, PinInit},
try_init,
@@ -24,13 +24,13 @@ use crate::{
};
use alloc::boxed::Box;
use core::{
- alloc::{AllocError, Layout},
+ alloc::Layout,
fmt,
marker::{PhantomData, Unsize},
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
pin::Pin,
- ptr::{NonNull, Pointee},
+ ptr::NonNull,
};
use macros::pin_data;
@@ -56,8 +56,8 @@ mod std_vendor;
/// b: u32,
/// }
///
-/// // Create a ref-counted instance of `Example`.
-/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// // Create a refcounted instance of `Example`.
+/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
///
/// // Get a new pointer to `obj` and increment the refcount.
/// let cloned = obj.clone();
@@ -96,7 +96,7 @@ mod std_vendor;
/// }
/// }
///
-/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
/// obj.use_reference();
/// obj.take_over();
/// # Ok::<(), Error>(())
@@ -119,7 +119,7 @@ mod std_vendor;
/// impl MyTrait for Example {}
///
/// // `obj` has type `Arc<Example>`.
-/// let obj: Arc<Example> = Arc::try_new(Example)?;
+/// let obj: Arc<Example> = Arc::new(Example, GFP_KERNEL)?;
///
/// // `coerced` has type `Arc<dyn MyTrait>`.
/// let coerced: Arc<dyn MyTrait> = obj;
@@ -137,6 +137,39 @@ struct ArcInner<T: ?Sized> {
data: T,
}
+impl<T: ?Sized> ArcInner<T> {
+ /// Converts a pointer to the contents of an [`Arc`] into a pointer to the [`ArcInner`].
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`Arc::into_raw`], and the `Arc` must
+ /// not yet have been destroyed.
+ unsafe fn container_of(ptr: *const T) -> NonNull<ArcInner<T>> {
+ let refcount_layout = Layout::new::<bindings::refcount_t>();
+ // SAFETY: The caller guarantees that the pointer is valid.
+ let val_layout = Layout::for_value(unsafe { &*ptr });
+ // SAFETY: We're computing the layout of a real struct that existed when compiling this
+ // binary, so its layout is not so large that it can trigger arithmetic overflow.
+ let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 };
+
+ // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and
+ // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field.
+ //
+ // This is documented at:
+ // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>.
+ let ptr = ptr as *const ArcInner<T>;
+
+ // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the
+ // pointer, since it originates from a previous call to `Arc::into_raw` on an `Arc` that is
+ // still valid.
+ let ptr = unsafe { ptr.byte_sub(val_offset) };
+
+ // SAFETY: The pointer can't be null since you can't have an `ArcInner<T>` value at the null
+ // address.
+ unsafe { NonNull::new_unchecked(ptr.cast_mut()) }
+ }
+}
+
// This is to allow [`Arc`] (and variants) to be used as the type of `self`.
impl<T: ?Sized> core::ops::Receiver for Arc<T> {}
@@ -162,7 +195,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
impl<T> Arc<T> {
/// Constructs a new reference counted instance of `T`.
- pub fn try_new(contents: T) -> Result<Self, AllocError> {
+ pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
let value = ArcInner {
// SAFETY: There are no safety requirements for this FFI call.
@@ -170,7 +203,7 @@ impl<T> Arc<T> {
data: contents,
};
- let inner = Box::try_new(value)?;
+ let inner = <Box<_> as BoxExt<_>>::new(value, flags)?;
// SAFETY: We just created `inner` with a reference count of 1, which is owned by the new
// `Arc` object.
@@ -181,22 +214,22 @@ impl<T> Arc<T> {
///
/// If `T: !Unpin` it will not be able to move afterwards.
#[inline]
- pub fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Self>
+ pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self>
where
Error: From<E>,
{
- UniqueArc::pin_init(init).map(|u| u.into())
+ UniqueArc::pin_init(init, flags).map(|u| u.into())
}
/// Use the given initializer to in-place initialize a `T`.
///
/// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned.
#[inline]
- pub fn init<E>(init: impl Init<T, E>) -> error::Result<Self>
+ pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> error::Result<Self>
where
Error: From<E>,
{
- UniqueArc::init(init).map(|u| u.into())
+ UniqueArc::init(init, flags).map(|u| u.into())
}
}
@@ -232,29 +265,13 @@ impl<T: ?Sized> Arc<T> {
/// `ptr` must have been returned by a previous call to [`Arc::into_raw`]. Additionally, it
/// must not be called more than once for each previous call to [`Arc::into_raw`].
pub unsafe fn from_raw(ptr: *const T) -> Self {
- let refcount_layout = Layout::new::<bindings::refcount_t>();
- // SAFETY: The caller guarantees that the pointer is valid.
- let val_layout = Layout::for_value(unsafe { &*ptr });
- // SAFETY: We're computing the layout of a real struct that existed when compiling this
- // binary, so its layout is not so large that it can trigger arithmetic overflow.
- let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 };
-
- let metadata: <T as Pointee>::Metadata = core::ptr::metadata(ptr);
- // SAFETY: The metadata of `T` and `ArcInner<T>` is the same because `ArcInner` is a struct
- // with `T` as its last field.
- //
- // This is documented at:
- // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>.
- let metadata: <ArcInner<T> as Pointee>::Metadata =
- unsafe { core::mem::transmute_copy(&metadata) };
- // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the
- // pointer, since it originates from a previous call to `Arc::into_raw` and is still valid.
- let ptr = unsafe { (ptr as *mut u8).sub(val_offset) as *mut () };
- let ptr = core::ptr::from_raw_parts_mut(ptr, metadata);
+ // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an
+ // `Arc` that is still valid.
+ let ptr = unsafe { ArcInner::container_of(ptr) };
// SAFETY: By the safety requirements we know that `ptr` came from `Arc::into_raw`, so the
// reference count held then will be owned by the new `Arc` object.
- unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ unsafe { Self::from_inner(ptr) }
}
/// Returns an [`ArcBorrow`] from the given [`Arc`].
@@ -273,6 +290,68 @@ impl<T: ?Sized> Arc<T> {
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
core::ptr::eq(this.ptr.as_ptr(), other.ptr.as_ptr())
}
+
+ /// Converts this [`Arc`] into a [`UniqueArc`], or destroys it if it is not unique.
+ ///
+ /// When this destroys the `Arc`, it does so while properly avoiding races. This means that
+ /// this method will never call the destructor of the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::{Arc, UniqueArc};
+ ///
+ /// let arc = Arc::new(42, GFP_KERNEL)?;
+ /// let unique_arc = arc.into_unique_or_drop();
+ ///
+ /// // The above conversion should succeed since refcount of `arc` is 1.
+ /// assert!(unique_arc.is_some());
+ ///
+ /// assert_eq!(*(unique_arc.unwrap()), 42);
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// ```
+ /// use kernel::sync::{Arc, UniqueArc};
+ ///
+ /// let arc = Arc::new(42, GFP_KERNEL)?;
+ /// let another = arc.clone();
+ ///
+ /// let unique_arc = arc.into_unique_or_drop();
+ ///
+ /// // The above conversion should fail since refcount of `arc` is >1.
+ /// assert!(unique_arc.is_none());
+ ///
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn into_unique_or_drop(self) -> Option<Pin<UniqueArc<T>>> {
+ // We will manually manage the refcount in this method, so we disable the destructor.
+ let me = ManuallyDrop::new(self);
+ // SAFETY: We own a refcount, so the pointer is still valid.
+ let refcount = unsafe { me.ptr.as_ref() }.refcount.get();
+
+ // If the refcount reaches a non-zero value, then we have destroyed this `Arc` and will
+ // return without further touching the `Arc`. If the refcount reaches zero, then there are
+ // no other arcs, and we can create a `UniqueArc`.
+ //
+ // SAFETY: We own a refcount, so the pointer is not dangling.
+ let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
+ if is_zero {
+ // SAFETY: We have exclusive access to the arc, so we can perform unsynchronized
+ // accesses to the refcount.
+ unsafe { core::ptr::write(refcount, bindings::REFCOUNT_INIT(1)) };
+
+ // INVARIANT: We own the only refcount to this arc, so we may create a `UniqueArc`. We
+ // must pin the `UniqueArc` because the values was previously in an `Arc`, and they pin
+ // their values.
+ Some(Pin::from(UniqueArc {
+ inner: ManuallyDrop::into_inner(me),
+ }))
+ } else {
+ None
+ }
+ }
}
impl<T: 'static> ForeignOwnable for Arc<T> {
@@ -365,12 +444,12 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// A borrowed reference to an [`Arc`] instance.
///
/// For cases when one doesn't ever need to increment the refcount on the allocation, it is simpler
-/// to use just `&T`, which we can trivially get from an `Arc<T>` instance.
+/// to use just `&T`, which we can trivially get from an [`Arc<T>`] instance.
///
/// However, when one may need to increment the refcount, it is preferable to use an `ArcBorrow<T>`
/// over `&Arc<T>` because the latter results in a double-indirection: a pointer (shared reference)
-/// to a pointer (`Arc<T>`) to the object (`T`). An [`ArcBorrow`] eliminates this double
-/// indirection while still allowing one to increment the refcount and getting an `Arc<T>` when/if
+/// to a pointer ([`Arc<T>`]) to the object (`T`). An [`ArcBorrow`] eliminates this double
+/// indirection while still allowing one to increment the refcount and getting an [`Arc<T>`] when/if
/// needed.
///
/// # Invariants
@@ -389,7 +468,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// e.into()
/// }
///
-/// let obj = Arc::try_new(Example)?;
+/// let obj = Arc::new(Example, GFP_KERNEL)?;
/// let cloned = do_something(obj.as_arc_borrow());
///
/// // Assert that both `obj` and `cloned` point to the same underlying object.
@@ -413,7 +492,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// }
/// }
///
-/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
/// obj.as_arc_borrow().use_reference();
/// # Ok::<(), Error>(())
/// ```
@@ -455,6 +534,27 @@ impl<T: ?Sized> ArcBorrow<'_, T> {
_p: PhantomData,
}
}
+
+ /// Creates an [`ArcBorrow`] to an [`Arc`] that has previously been deconstructed with
+ /// [`Arc::into_raw`].
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must originate from a call to [`Arc::into_raw`].
+ /// * For the duration of the lifetime annotated on this `ArcBorrow`, the reference count must
+ /// not hit zero.
+ /// * For the duration of the lifetime annotated on this `ArcBorrow`, there must not be a
+ /// [`UniqueArc`] reference to this value.
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an
+ // `Arc` that is still valid.
+ let ptr = unsafe { ArcInner::container_of(ptr) };
+
+ // SAFETY: The caller promises that the value remains valid since the reference count must
+ // not hit zero, and no mutable reference will be created since that would involve a
+ // `UniqueArc`.
+ unsafe { Self::new(ptr) }
+ }
}
impl<T: ?Sized> From<ArcBorrow<'_, T>> for Arc<T> {
@@ -501,7 +601,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// }
///
/// fn test() -> Result<Arc<Example>> {
-/// let mut x = UniqueArc::try_new(Example { a: 10, b: 20 })?;
+/// let mut x = UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?;
/// x.a += 1;
/// x.b += 1;
/// Ok(x.into())
@@ -510,7 +610,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// # test().unwrap();
/// ```
///
-/// In the following example we first allocate memory for a ref-counted `Example` but we don't
+/// In the following example we first allocate memory for a refcounted `Example` but we don't
/// initialise it on allocation. We do initialise it later with a call to [`UniqueArc::write`],
/// followed by a conversion to `Arc<Example>`. This is particularly useful when allocation happens
/// in one context (e.g., sleepable) and initialisation in another (e.g., atomic):
@@ -524,7 +624,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// }
///
/// fn test() -> Result<Arc<Example>> {
-/// let x = UniqueArc::try_new_uninit()?;
+/// let x = UniqueArc::new_uninit(GFP_KERNEL)?;
/// Ok(x.write(Example { a: 10, b: 20 }).into())
/// }
///
@@ -544,7 +644,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// }
///
/// fn test() -> Result<Arc<Example>> {
-/// let mut pinned = Pin::from(UniqueArc::try_new(Example { a: 10, b: 20 })?);
+/// let mut pinned = Pin::from(UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?);
/// // We can modify `pinned` because it is `Unpin`.
/// pinned.as_mut().a += 1;
/// Ok(pinned.into())
@@ -558,23 +658,26 @@ pub struct UniqueArc<T: ?Sized> {
impl<T> UniqueArc<T> {
/// Tries to allocate a new [`UniqueArc`] instance.
- pub fn try_new(value: T) -> Result<Self, AllocError> {
+ pub fn new(value: T, flags: Flags) -> Result<Self, AllocError> {
Ok(Self {
- // INVARIANT: The newly-created object has a ref-count of 1.
- inner: Arc::try_new(value)?,
+ // INVARIANT: The newly-created object has a refcount of 1.
+ inner: Arc::new(value, flags)?,
})
}
/// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
- pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
+ pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
- let inner = Box::try_init::<AllocError>(try_init!(ArcInner {
- // SAFETY: There are no safety requirements for this FFI call.
- refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
- data <- init::uninit::<T, AllocError>(),
- }? AllocError))?;
+ let inner = Box::try_init::<AllocError>(
+ try_init!(ArcInner {
+ // SAFETY: There are no safety requirements for this FFI call.
+ refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+ data <- init::uninit::<T, AllocError>(),
+ }? AllocError),
+ flags,
+ )?;
Ok(UniqueArc {
- // INVARIANT: The newly-created object has a ref-count of 1.
+ // INVARIANT: The newly-created object has a refcount of 1.
// SAFETY: The pointer from the `Box` is valid.
inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
})
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index b679b6f6dbeb..2b306afbe56d 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -6,8 +6,17 @@
//! variable.
use super::{lock::Backend, lock::Guard, LockClassKey};
-use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque};
+use crate::{
+ init::PinInit,
+ pin_init,
+ str::CStr,
+ task::{MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE},
+ time::Jiffies,
+ types::Opaque,
+};
+use core::ffi::{c_int, c_long};
use core::marker::PhantomPinned;
+use core::ptr;
use macros::pin_data;
/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class.
@@ -17,6 +26,7 @@ macro_rules! new_condvar {
$crate::sync::CondVar::new($crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_condvar;
/// A conditional variable.
///
@@ -34,8 +44,7 @@ macro_rules! new_condvar {
/// The following is an example of using a condvar with a mutex:
///
/// ```
-/// use kernel::sync::{CondVar, Mutex};
-/// use kernel::{new_condvar, new_mutex};
+/// use kernel::sync::{new_condvar, new_mutex, CondVar, Mutex};
///
/// #[pin_data]
/// pub struct Example {
@@ -50,7 +59,7 @@ macro_rules! new_condvar {
/// fn wait_for_value(e: &Example, v: u32) {
/// let mut guard = e.value.lock();
/// while *guard != v {
-/// e.value_changed.wait_uninterruptible(&mut guard);
+/// e.value_changed.wait(&mut guard);
/// }
/// }
///
@@ -65,18 +74,20 @@ macro_rules! new_condvar {
/// Box::pin_init(pin_init!(Example {
/// value <- new_mutex!(0),
/// value_changed <- new_condvar!(),
-/// }))
+/// }), GFP_KERNEL)
/// }
/// ```
///
-/// [`struct wait_queue_head`]: ../../../include/linux/wait.h
+/// [`struct wait_queue_head`]: srctree/include/linux/wait.h
#[pin_data]
pub struct CondVar {
#[pin]
- pub(crate) wait_list: Opaque<bindings::wait_queue_head>,
+ pub(crate) wait_queue_head: Opaque<bindings::wait_queue_head>,
/// A condvar needs to be pinned because it contains a [`struct list_head`] that is
/// self-referential, so it cannot be safely moved once it is initialised.
+ ///
+ /// [`struct list_head`]: srctree/include/linux/types.h
#[pin]
_pin: PhantomPinned,
}
@@ -96,71 +107,109 @@ impl CondVar {
_pin: PhantomPinned,
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
// static lifetimes so they live indefinitely.
- wait_list <- Opaque::ffi_init(|slot| unsafe {
+ wait_queue_head <- Opaque::ffi_init(|slot| unsafe {
bindings::__init_waitqueue_head(slot, name.as_char_ptr(), key.as_ptr())
}),
})
}
- fn wait_internal<T: ?Sized, B: Backend>(&self, wait_state: u32, guard: &mut Guard<'_, T, B>) {
+ fn wait_internal<T: ?Sized, B: Backend>(
+ &self,
+ wait_state: c_int,
+ guard: &mut Guard<'_, T, B>,
+ timeout_in_jiffies: c_long,
+ ) -> c_long {
let wait = Opaque::<bindings::wait_queue_entry>::uninit();
// SAFETY: `wait` points to valid memory.
unsafe { bindings::init_wait(wait.get()) };
- // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ // SAFETY: Both `wait` and `wait_queue_head` point to valid memory.
unsafe {
- bindings::prepare_to_wait_exclusive(self.wait_list.get(), wait.get(), wait_state as _)
+ bindings::prepare_to_wait_exclusive(self.wait_queue_head.get(), wait.get(), wait_state)
};
- // SAFETY: No arguments, switches to another thread.
- guard.do_unlocked(|| unsafe { bindings::schedule() });
+ // SAFETY: Switches to another thread. The timeout can be any number.
+ let ret = guard.do_unlocked(|| unsafe { bindings::schedule_timeout(timeout_in_jiffies) });
+
+ // SAFETY: Both `wait` and `wait_queue_head` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_queue_head.get(), wait.get()) };
- // SAFETY: Both `wait` and `wait_list` point to valid memory.
- unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) };
+ ret
}
- /// Releases the lock and waits for a notification in interruptible mode.
+ /// Releases the lock and waits for a notification in uninterruptible mode.
///
/// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
/// thread to sleep, reacquiring the lock on wake up. It wakes up when notified by
- /// [`CondVar::notify_one`] or [`CondVar::notify_all`], or when the thread receives a signal.
- /// It may also wake up spuriously.
+ /// [`CondVar::notify_one`] or [`CondVar::notify_all`]. Note that it may also wake up
+ /// spuriously.
+ pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) {
+ self.wait_internal(TASK_UNINTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT);
+ }
+
+ /// Releases the lock and waits for a notification in interruptible mode.
+ ///
+ /// Similar to [`CondVar::wait`], except that the wait is interruptible. That is, the thread may
+ /// wake up due to signals. It may also wake up spuriously.
///
/// Returns whether there is a signal pending.
- #[must_use = "wait returns if a signal is pending, so the caller must check the return value"]
- pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool {
- self.wait_internal(bindings::TASK_INTERRUPTIBLE, guard);
+ #[must_use = "wait_interruptible returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait_interruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool {
+ self.wait_internal(TASK_INTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT);
crate::current!().signal_pending()
}
- /// Releases the lock and waits for a notification in uninterruptible mode.
+ /// Releases the lock and waits for a notification in interruptible mode.
///
- /// Similar to [`CondVar::wait`], except that the wait is not interruptible. That is, the
- /// thread won't wake up due to signals. It may, however, wake up supirously.
- pub fn wait_uninterruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) {
- self.wait_internal(bindings::TASK_UNINTERRUPTIBLE, guard)
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or
+ /// [`CondVar::notify_all`], or when a timeout occurs, or when the thread receives a signal.
+ #[must_use = "wait_interruptible_timeout returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait_interruptible_timeout<T: ?Sized, B: Backend>(
+ &self,
+ guard: &mut Guard<'_, T, B>,
+ jiffies: Jiffies,
+ ) -> CondVarTimeoutResult {
+ let jiffies = jiffies.try_into().unwrap_or(MAX_SCHEDULE_TIMEOUT);
+ let res = self.wait_internal(TASK_INTERRUPTIBLE, guard, jiffies);
+
+ match (res as Jiffies, crate::current!().signal_pending()) {
+ (jiffies, true) => CondVarTimeoutResult::Signal { jiffies },
+ (0, false) => CondVarTimeoutResult::Timeout,
+ (jiffies, false) => CondVarTimeoutResult::Woken { jiffies },
+ }
}
- /// Calls the kernel function to notify the appropriate number of threads with the given flags.
- fn notify(&self, count: i32, flags: u32) {
- // SAFETY: `wait_list` points to valid memory.
+ /// Calls the kernel function to notify the appropriate number of threads.
+ fn notify(&self, count: c_int) {
+ // SAFETY: `wait_queue_head` points to valid memory.
unsafe {
bindings::__wake_up(
- self.wait_list.get(),
- bindings::TASK_NORMAL,
+ self.wait_queue_head.get(),
+ TASK_NORMAL,
count,
- flags as _,
+ ptr::null_mut(),
)
};
}
+ /// Calls the kernel function to notify one thread synchronously.
+ ///
+ /// This method behaves like `notify_one`, except that it hints to the scheduler that the
+ /// current thread is about to go to sleep, so it should schedule the target thread on the same
+ /// CPU.
+ pub fn notify_sync(&self) {
+ // SAFETY: `wait_queue_head` points to valid memory.
+ unsafe { bindings::__wake_up_sync(self.wait_queue_head.get(), TASK_NORMAL) };
+ }
+
/// Wakes a single waiter up, if any.
///
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
pub fn notify_one(&self) {
- self.notify(1, 0);
+ self.notify(1);
}
/// Wakes all waiters up, if any.
@@ -168,6 +217,22 @@ impl CondVar {
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
pub fn notify_all(&self) {
- self.notify(0, 0);
+ self.notify(0);
}
}
+
+/// The return type of `wait_timeout`.
+pub enum CondVarTimeoutResult {
+ /// The timeout was reached.
+ Timeout,
+ /// Somebody woke us up.
+ Woken {
+ /// Remaining sleep duration.
+ jiffies: Jiffies,
+ },
+ /// A signal occurred.
+ Signal {
+ /// Remaining sleep duration.
+ jiffies: Jiffies,
+ },
+}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index f12a684bc957..f6c34ca4d819 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -6,7 +6,7 @@
//! spinlocks, raw spinlocks) to be provided with minimal effort.
use super::LockClassKey;
-use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
+use crate::{init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};
use macros::pin_data;
@@ -21,14 +21,21 @@ pub mod spinlock;
/// # Safety
///
/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
-/// is owned, that is, between calls to `lock` and `unlock`.
-/// - Implementers must also ensure that `relock` uses the same locking method as the original
-/// lock operation.
+/// is owned, that is, between calls to [`lock`] and [`unlock`].
+/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
+/// lock operation.
+///
+/// [`lock`]: Backend::lock
+/// [`unlock`]: Backend::unlock
+/// [`relock`]: Backend::relock
pub unsafe trait Backend {
/// The state required by the lock.
type State;
- /// The state required to be kept between lock and unlock.
+ /// The state required to be kept between [`lock`] and [`unlock`].
+ ///
+ /// [`lock`]: Backend::lock
+ /// [`unlock`]: Backend::unlock
type GuardState;
/// Initialises the lock.
@@ -139,7 +146,7 @@ pub struct Guard<'a, T: ?Sized, B: Backend> {
unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
- pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) {
+ pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
@@ -147,7 +154,7 @@ impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
let _relock =
ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
- cb();
+ cb()
}
}
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 09276fedc091..30632070ee67 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -4,8 +4,6 @@
//!
//! This module allows Rust code to use the kernel's `struct mutex`.
-use crate::bindings;
-
/// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class.
///
/// It uses the name if one is given, otherwise it generates one based on the file name and line
@@ -17,6 +15,7 @@ macro_rules! new_mutex {
$inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_mutex;
/// A mutual exclusion primitive.
///
@@ -35,7 +34,7 @@ macro_rules! new_mutex {
/// contains an inner struct (`Inner`) that is protected by a mutex.
///
/// ```
-/// use kernel::{init::InPlaceInit, init::PinInit, new_mutex, pin_init, sync::Mutex};
+/// use kernel::sync::{new_mutex, Mutex};
///
/// struct Inner {
/// a: u32,
@@ -59,7 +58,7 @@ macro_rules! new_mutex {
/// }
///
/// // Allocate a boxed `Example`.
-/// let e = Box::pin_init(Example::new())?;
+/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
@@ -84,7 +83,7 @@ macro_rules! new_mutex {
/// }
/// ```
///
-/// [`struct mutex`]: ../../../../include/linux/mutex.h
+/// [`struct mutex`]: srctree/include/linux/mutex.h
pub type Mutex<T> = super::Lock<T, MutexBackend>;
/// A kernel `struct mutex` lock backend.
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 91eb2c9e9123..ea5c5bc1ce12 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -4,8 +4,6 @@
//!
//! This module allows Rust code to use the kernel's `spinlock_t`.
-use crate::bindings;
-
/// Creates a [`SpinLock`] initialiser with the given name and a newly-created lock class.
///
/// It uses the name if one is given, otherwise it generates one based on the file name and line
@@ -17,6 +15,7 @@ macro_rules! new_spinlock {
$inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_spinlock;
/// A spinlock.
///
@@ -33,7 +32,7 @@ macro_rules! new_spinlock {
/// contains an inner struct (`Inner`) that is protected by a spinlock.
///
/// ```
-/// use kernel::{init::InPlaceInit, init::PinInit, new_spinlock, pin_init, sync::SpinLock};
+/// use kernel::sync::{new_spinlock, SpinLock};
///
/// struct Inner {
/// a: u32,
@@ -57,7 +56,7 @@ macro_rules! new_spinlock {
/// }
///
/// // Allocate a boxed `Example`.
-/// let e = Box::pin_init(Example::new())?;
+/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
@@ -82,7 +81,7 @@ macro_rules! new_spinlock {
/// }
/// ```
///
-/// [`spinlock_t`]: ../../../../include/linux/spinlock.h
+/// [`spinlock_t`]: srctree/include/linux/spinlock.h
pub type SpinLock<T> = super::Lock<T, SpinLockBackend>;
/// A kernel `spinlock_t` lock backend.
@@ -112,7 +111,7 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
- // caller is the owner of the mutex.
+ // caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
index b17ee5cd98f3..babc731bd5f6 100644
--- a/rust/kernel/sync/locked_by.rs
+++ b/rust/kernel/sync/locked_by.rs
@@ -9,14 +9,17 @@ use core::{cell::UnsafeCell, mem::size_of, ptr};
/// Allows access to some data to be serialised by a lock that does not wrap it.
///
/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g.,
-/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not
-/// possible. For example, if a container has a lock and some data in the contained elements needs
+/// [`Mutex`] or [`SpinLock`]. [`LockedBy`] is meant for cases when this is not possible.
+/// For example, if a container has a lock and some data in the contained elements needs
/// to be protected by the same lock.
///
/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it
/// when the caller shows evidence that the 'external' lock is locked. It panics if the evidence
/// refers to the wrong instance of the lock.
///
+/// [`Mutex`]: super::Mutex
+/// [`SpinLock`]: super::SpinLock
+///
/// # Examples
///
/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an