aboutsummaryrefslogtreecommitdiff
path: root/rust/kernel/sync
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/sync')
-rw-r--r--rust/kernel/sync/arc.rs108
-rw-r--r--rust/kernel/sync/arc/std_vendor.rs28
-rw-r--r--rust/kernel/sync/condvar.rs174
-rw-r--r--rust/kernel/sync/lock.rs191
-rw-r--r--rust/kernel/sync/lock/mutex.rs118
-rw-r--r--rust/kernel/sync/lock/spinlock.rs117
-rw-r--r--rust/kernel/sync/locked_by.rs156
7 files changed, 886 insertions, 6 deletions
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index f2f1c83d72ba..e6d206242465 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -17,17 +17,24 @@
use crate::{
bindings,
- error::Result,
+ error::{self, Error},
+ init::{self, InPlaceInit, Init, PinInit},
+ try_init,
types::{ForeignOwnable, Opaque},
};
use alloc::boxed::Box;
use core::{
+ alloc::AllocError,
+ fmt,
marker::{PhantomData, Unsize},
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
pin::Pin,
ptr::NonNull,
};
+use macros::pin_data;
+
+mod std_vendor;
/// A reference-counted pointer to an instance of `T`.
///
@@ -120,6 +127,7 @@ pub struct Arc<T: ?Sized> {
_p: PhantomData<ArcInner<T>>,
}
+#[pin_data]
#[repr(C)]
struct ArcInner<T: ?Sized> {
refcount: Opaque<bindings::refcount_t>,
@@ -149,7 +157,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
impl<T> Arc<T> {
/// Constructs a new reference counted instance of `T`.
- pub fn try_new(contents: T) -> Result<Self> {
+ pub fn try_new(contents: T) -> Result<Self, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
let value = ArcInner {
// SAFETY: There are no safety requirements for this FFI call.
@@ -163,6 +171,28 @@ impl<T> Arc<T> {
// `Arc` object.
Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
}
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ #[inline]
+ pub fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Self>
+ where
+ Error: From<E>,
+ {
+ UniqueArc::pin_init(init).map(|u| u.into())
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// This is equivalent to [`pin_init`], since an [`Arc`] is always pinned.
+ #[inline]
+ pub fn init<E>(init: impl Init<T, E>) -> error::Result<Self>
+ where
+ Error: From<E>,
+ {
+ UniqueArc::init(init).map(|u| u.into())
+ }
}
impl<T: ?Sized> Arc<T> {
@@ -469,7 +499,7 @@ pub struct UniqueArc<T: ?Sized> {
impl<T> UniqueArc<T> {
/// Tries to allocate a new [`UniqueArc`] instance.
- pub fn try_new(value: T) -> Result<Self> {
+ pub fn try_new(value: T) -> Result<Self, AllocError> {
Ok(Self {
// INVARIANT: The newly-created object has a ref-count of 1.
inner: Arc::try_new(value)?,
@@ -477,10 +507,17 @@ impl<T> UniqueArc<T> {
}
/// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
- pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>> {
- Ok(UniqueArc::<MaybeUninit<T>> {
+ pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
+ // INVARIANT: The refcount is initialised to a non-zero value.
+ let inner = Box::try_init::<AllocError>(try_init!(ArcInner {
+ // SAFETY: There are no safety requirements for this FFI call.
+ refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+ data <- init::uninit::<T, AllocError>(),
+ }? AllocError))?;
+ Ok(UniqueArc {
// INVARIANT: The newly-created object has a ref-count of 1.
- inner: Arc::try_new(MaybeUninit::uninit())?,
+ // SAFETY: The pointer from the `Box` is valid.
+ inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
})
}
}
@@ -489,6 +526,17 @@ impl<T> UniqueArc<MaybeUninit<T>> {
/// Converts a `UniqueArc<MaybeUninit<T>>` into a `UniqueArc<T>` by writing a value into it.
pub fn write(mut self, value: T) -> UniqueArc<T> {
self.deref_mut().write(value);
+ // SAFETY: We just wrote the value to be initialized.
+ unsafe { self.assume_init() }
+ }
+
+ /// Unsafely assume that `self` is initialized.
+ ///
+ /// # Safety
+ ///
+ /// The caller guarantees that the value behind this pointer has been initialized. It is
+ /// *immediate* UB to call this when the value is not initialized.
+ pub unsafe fn assume_init(self) -> UniqueArc<T> {
let inner = ManuallyDrop::new(self).inner.ptr;
UniqueArc {
// SAFETY: The new `Arc` is taking over `ptr` from `self.inner` (which won't be
@@ -496,6 +544,30 @@ impl<T> UniqueArc<MaybeUninit<T>> {
inner: unsafe { Arc::from_inner(inner.cast()) },
}
}
+
+ /// Initialize `self` using the given initializer.
+ pub fn init_with<E>(mut self, init: impl Init<T, E>) -> core::result::Result<UniqueArc<T>, E> {
+ // SAFETY: The supplied pointer is valid for initialization.
+ match unsafe { init.__init(self.as_mut_ptr()) } {
+ // SAFETY: Initialization completed successfully.
+ Ok(()) => Ok(unsafe { self.assume_init() }),
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Pin-initialize `self` using the given pin-initializer.
+ pub fn pin_init_with<E>(
+ mut self,
+ init: impl PinInit<T, E>,
+ ) -> core::result::Result<Pin<UniqueArc<T>>, E> {
+ // SAFETY: The supplied pointer is valid for initialization and we will later pin the value
+ // to ensure it does not move.
+ match unsafe { init.__pinned_init(self.as_mut_ptr()) } {
+ // SAFETY: Initialization completed successfully.
+ Ok(()) => Ok(unsafe { self.assume_init() }.into()),
+ Err(err) => Err(err),
+ }
+ }
}
impl<T: ?Sized> From<UniqueArc<T>> for Pin<UniqueArc<T>> {
@@ -522,3 +594,27 @@ impl<T: ?Sized> DerefMut for UniqueArc<T> {
unsafe { &mut self.inner.ptr.as_mut().data }
}
}
+
+impl<T: fmt::Display + ?Sized> fmt::Display for UniqueArc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Display + ?Sized> fmt::Display for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Debug + ?Sized> fmt::Debug for UniqueArc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Debug + ?Sized> fmt::Debug for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.deref(), f)
+ }
+}
diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs
new file mode 100644
index 000000000000..a66a0c2831b3
--- /dev/null
+++ b/rust/kernel/sync/arc/std_vendor.rs
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! The contents of this file come from the Rust standard library, hosted in
+//! the <https://github.com/rust-lang/rust> repository, licensed under
+//! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
+//! see <https://github.com/rust-lang/rust/blob/master/COPYRIGHT>.
+
+use crate::sync::{arc::ArcInner, Arc};
+use core::any::Any;
+
+impl Arc<dyn Any + Send + Sync> {
+ /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
+ pub fn downcast<T>(self) -> core::result::Result<Arc<T>, Self>
+ where
+ T: Any + Send + Sync,
+ {
+ if (*self).is::<T>() {
+ // SAFETY: We have just checked that the type is correct, so we can cast the pointer.
+ unsafe {
+ let ptr = self.ptr.cast::<ArcInner<T>>();
+ core::mem::forget(self);
+ Ok(Arc::from_inner(ptr))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
new file mode 100644
index 000000000000..ed353399c4e5
--- /dev/null
+++ b/rust/kernel/sync/condvar.rs
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A condition variable.
+//!
+//! This module allows Rust code to use the kernel's [`struct wait_queue_head`] as a condition
+//! variable.
+
+use super::{lock::Backend, lock::Guard, LockClassKey};
+use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque};
+use core::marker::PhantomPinned;
+use macros::pin_data;
+
+/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class.
+#[macro_export]
+macro_rules! new_condvar {
+ ($($name:literal)?) => {
+ $crate::sync::CondVar::new($crate::optional_name!($($name)?), $crate::static_lock_class!())
+ };
+}
+
+/// A conditional variable.
+///
+/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to
+/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And
+/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or
+/// [`CondVar::notify_all`]) or because the thread received a signal. It may also wake up
+/// spuriously.
+///
+/// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such
+/// instances is with the [`pin_init`](crate::pin_init) and [`new_condvar`] macros.
+///
+/// # Examples
+///
+/// The following is an example of using a condvar with a mutex:
+///
+/// ```
+/// use kernel::sync::{CondVar, Mutex};
+/// use kernel::{new_condvar, new_mutex};
+///
+/// #[pin_data]
+/// pub struct Example {
+/// #[pin]
+/// value: Mutex<u32>,
+///
+/// #[pin]
+/// value_changed: CondVar,
+/// }
+///
+/// /// Waits for `e.value` to become `v`.
+/// fn wait_for_value(e: &Example, v: u32) {
+/// let mut guard = e.value.lock();
+/// while *guard != v {
+/// e.value_changed.wait_uninterruptible(&mut guard);
+/// }
+/// }
+///
+/// /// Increments `e.value` and notifies all potential waiters.
+/// fn increment(e: &Example) {
+/// *e.value.lock() += 1;
+/// e.value_changed.notify_all();
+/// }
+///
+/// /// Allocates a new boxed `Example`.
+/// fn new_example() -> Result<Pin<Box<Example>>> {
+/// Box::pin_init(pin_init!(Example {
+/// value <- new_mutex!(0),
+/// value_changed <- new_condvar!(),
+/// }))
+/// }
+/// ```
+///
+/// [`struct wait_queue_head`]: ../../../include/linux/wait.h
+#[pin_data]
+pub struct CondVar {
+ #[pin]
+ pub(crate) wait_list: Opaque<bindings::wait_queue_head>,
+
+ /// A condvar needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for CondVar {}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
+// concurrently.
+unsafe impl Sync for CondVar {}
+
+impl CondVar {
+ /// Constructs a new condvar initialiser.
+ #[allow(clippy::new_ret_no_self)]
+ pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pin_init!(Self {
+ _pin: PhantomPinned,
+ // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
+ // static lifetimes so they live indefinitely.
+ wait_list <- Opaque::ffi_init(|slot| unsafe {
+ bindings::__init_waitqueue_head(slot, name.as_char_ptr(), key.as_ptr())
+ }),
+ })
+ }
+
+ fn wait_internal<T: ?Sized, B: Backend>(&self, wait_state: u32, guard: &mut Guard<'_, T, B>) {
+ let wait = Opaque::<bindings::wait_queue_entry>::uninit();
+
+ // SAFETY: `wait` points to valid memory.
+ unsafe { bindings::init_wait(wait.get()) };
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe {
+ bindings::prepare_to_wait_exclusive(self.wait_list.get(), wait.get(), wait_state as _)
+ };
+
+ // SAFETY: No arguments, switches to another thread.
+ guard.do_unlocked(|| unsafe { bindings::schedule() });
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) };
+ }
+
+ /// Releases the lock and waits for a notification in interruptible mode.
+ ///
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep, reacquiring the lock on wake up. It wakes up when notified by
+ /// [`CondVar::notify_one`] or [`CondVar::notify_all`], or when the thread receives a signal.
+ /// It may also wake up spuriously.
+ ///
+ /// Returns whether there is a signal pending.
+ #[must_use = "wait returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool {
+ self.wait_internal(bindings::TASK_INTERRUPTIBLE, guard);
+ crate::current!().signal_pending()
+ }
+
+ /// Releases the lock and waits for a notification in uninterruptible mode.
+ ///
+ /// Similar to [`CondVar::wait`], except that the wait is not interruptible. That is, the
+ /// thread won't wake up due to signals. It may, however, wake up supirously.
+ pub fn wait_uninterruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) {
+ self.wait_internal(bindings::TASK_UNINTERRUPTIBLE, guard)
+ }
+
+ /// Calls the kernel function to notify the appropriate number of threads with the given flags.
+ fn notify(&self, count: i32, flags: u32) {
+ // SAFETY: `wait_list` points to valid memory.
+ unsafe {
+ bindings::__wake_up(
+ self.wait_list.get(),
+ bindings::TASK_NORMAL,
+ count,
+ flags as _,
+ )
+ };
+ }
+
+ /// Wakes a single waiter up, if any.
+ ///
+ /// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
+ /// completely (as opposed to automatically waking up the next waiter).
+ pub fn notify_one(&self) {
+ self.notify(1, 0);
+ }
+
+ /// Wakes all waiters up, if any.
+ ///
+ /// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
+ /// completely (as opposed to automatically waking up the next waiter).
+ pub fn notify_all(&self) {
+ self.notify(0, 0);
+ }
+}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
new file mode 100644
index 000000000000..a2216325632d
--- /dev/null
+++ b/rust/kernel/sync/lock.rs
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic kernel lock and guard.
+//!
+//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
+//! spinlocks, raw spinlocks) to be provided with minimal effort.
+
+use super::LockClassKey;
+use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
+use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};
+use macros::pin_data;
+
+pub mod mutex;
+pub mod spinlock;
+
+/// The "backend" of a lock.
+///
+/// It is the actual implementation of the lock, without the need to repeat patterns used in all
+/// locks.
+///
+/// # Safety
+///
+/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
+/// is owned, that is, between calls to `lock` and `unlock`.
+/// - Implementers must also ensure that `relock` uses the same locking method as the original
+/// lock operation.
+pub unsafe trait Backend {
+ /// The state required by the lock.
+ type State;
+
+ /// The state required to be kept between lock and unlock.
+ type GuardState;
+
+ /// Initialises the lock.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
+ /// remain valid for read indefinitely.
+ unsafe fn init(
+ ptr: *mut Self::State,
+ name: *const core::ffi::c_char,
+ key: *mut bindings::lock_class_key,
+ );
+
+ /// Acquires the lock, making the caller its owner.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ #[must_use]
+ unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
+
+ /// Releases the lock, giving up its ownership.
+ ///
+ /// # Safety
+ ///
+ /// It must only be called by the current owner of the lock.
+ unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
+
+ /// Reacquires the lock, making the caller its owner.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
+ /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
+ unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
+ // SAFETY: The safety requirements ensure that the lock is initialised.
+ *guard_state = unsafe { Self::lock(ptr) };
+ }
+}
+
+/// A mutual exclusion primitive.
+///
+/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock backend
+/// specified as the generic parameter `B`.
+#[pin_data]
+pub struct Lock<T: ?Sized, B: Backend> {
+ /// The kernel lock object.
+ #[pin]
+ state: Opaque<B::State>,
+
+ /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
+ /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
+ /// some architecture uses self-references now or in the future.
+ #[pin]
+ _pin: PhantomPinned,
+
+ /// The data protected by the lock.
+ pub(crate) data: UnsafeCell<T>,
+}
+
+// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
+
+// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
+
+impl<T, B: Backend> Lock<T, B> {
+ /// Constructs a new lock initialiser.
+ #[allow(clippy::new_ret_no_self)]
+ pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pin_init!(Self {
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
+ // static lifetimes so they live indefinitely.
+ state <- Opaque::ffi_init(|slot| unsafe {
+ B::init(slot, name.as_char_ptr(), key.as_ptr())
+ }),
+ })
+ }
+}
+
+impl<T: ?Sized, B: Backend> Lock<T, B> {
+ /// Acquires the lock and gives the caller access to the data protected by it.
+ pub fn lock(&self) -> Guard<'_, T, B> {
+ // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
+ // that `init` was called.
+ let state = unsafe { B::lock(self.state.get()) };
+ // SAFETY: The lock was just acquired.
+ unsafe { Guard::new(self, state) }
+ }
+}
+
+/// A lock guard.
+///
+/// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock
+/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
+/// protected by the lock.
+#[must_use = "the lock unlocks immediately when the guard is unused"]
+pub struct Guard<'a, T: ?Sized, B: Backend> {
+ pub(crate) lock: &'a Lock<T, B>,
+ pub(crate) state: B::GuardState,
+ _not_send: PhantomData<*mut ()>,
+}
+
+// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
+unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
+
+impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
+ pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) {
+ // SAFETY: The caller owns the lock, so it is safe to unlock it.
+ unsafe { B::unlock(self.lock.state.get(), &self.state) };
+
+ // SAFETY: The lock was just unlocked above and is being relocked now.
+ let _relock =
+ ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
+
+ cb();
+ }
+}
+
+impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
+ fn drop(&mut self) {
+ // SAFETY: The caller owns the lock, so it is safe to unlock it.
+ unsafe { B::unlock(self.lock.state.get(), &self.state) };
+ }
+}
+
+impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
+ /// Constructs a new immutable lock guard.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that it owns the lock.
+ pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ Self {
+ lock,
+ state,
+ _not_send: PhantomData,
+ }
+ }
+}
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
new file mode 100644
index 000000000000..923472f04af4
--- /dev/null
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel mutex.
+//!
+//! This module allows Rust code to use the kernel's `struct mutex`.
+
+use crate::bindings;
+
+/// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class.
+///
+/// It uses the name if one is given, otherwise it generates one based on the file name and line
+/// number.
+#[macro_export]
+macro_rules! new_mutex {
+ ($inner:expr $(, $name:literal)? $(,)?) => {
+ $crate::sync::Mutex::new(
+ $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
+ };
+}
+
+/// A mutual exclusion primitive.
+///
+/// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex,
+/// only one at a time is allowed to progress, the others will block (sleep) until the mutex is
+/// unlocked, at which point another thread will be allowed to wake up and make progress.
+///
+/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts.
+///
+/// Instances of [`Mutex`] need a lock class and to be pinned. The recommended way to create such
+/// instances is with the [`pin_init`](crate::pin_init) and [`new_mutex`] macros.
+///
+/// # Examples
+///
+/// The following example shows how to declare, allocate and initialise a struct (`Example`) that
+/// contains an inner struct (`Inner`) that is protected by a mutex.
+///
+/// ```
+/// use kernel::{init::InPlaceInit, init::PinInit, new_mutex, pin_init, sync::Mutex};
+///
+/// struct Inner {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// #[pin_data]
+/// struct Example {
+/// c: u32,
+/// #[pin]
+/// d: Mutex<Inner>,
+/// }
+///
+/// impl Example {
+/// fn new() -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// c: 10,
+/// d <- new_mutex!(Inner { a: 20, b: 30 }),
+/// })
+/// }
+/// }
+///
+/// // Allocate a boxed `Example`.
+/// let e = Box::pin_init(Example::new())?;
+/// assert_eq!(e.c, 10);
+/// assert_eq!(e.d.lock().a, 20);
+/// assert_eq!(e.d.lock().b, 30);
+/// ```
+///
+/// The following example shows how to use interior mutability to modify the contents of a struct
+/// protected by a mutex despite only having a shared reference:
+///
+/// ```
+/// use kernel::sync::Mutex;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn example(m: &Mutex<Example>) {
+/// let mut guard = m.lock();
+/// guard.a += 10;
+/// guard.b += 20;
+/// }
+/// ```
+///
+/// [`struct mutex`]: ../../../../include/linux/mutex.h
+pub type Mutex<T> = super::Lock<T, MutexBackend>;
+
+/// A kernel `struct mutex` lock backend.
+pub struct MutexBackend;
+
+// SAFETY: The underlying kernel `struct mutex` object ensures mutual exclusion.
+unsafe impl super::Backend for MutexBackend {
+ type State = bindings::mutex;
+ type GuardState = ();
+
+ unsafe fn init(
+ ptr: *mut Self::State,
+ name: *const core::ffi::c_char,
+ key: *mut bindings::lock_class_key,
+ ) {
+ // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
+ // `key` are valid for read indefinitely.
+ unsafe { bindings::__mutex_init(ptr, name, key) }
+ }
+
+ unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
+ // SAFETY: The safety requirements of this function ensure that `ptr` points to valid
+ // memory, and that it has been initialised before.
+ unsafe { bindings::mutex_lock(ptr) };
+ }
+
+ unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
+ // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
+ // caller is the owner of the mutex.
+ unsafe { bindings::mutex_unlock(ptr) };
+ }
+}
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
new file mode 100644
index 000000000000..979b56464a4e
--- /dev/null
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel spinlock.
+//!
+//! This module allows Rust code to use the kernel's `spinlock_t`.
+
+use crate::bindings;
+
+/// Creates a [`SpinLock`] initialiser with the given name and a newly-created lock class.
+///
+/// It uses the name if one is given, otherwise it generates one based on the file name and line
+/// number.
+#[macro_export]
+macro_rules! new_spinlock {
+ ($inner:expr $(, $name:literal)? $(,)?) => {
+ $crate::sync::SpinLock::new(
+ $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
+ };
+}
+
+/// A spinlock.
+///
+/// Exposes the kernel's [`spinlock_t`]. When multiple CPUs attempt to lock the same spinlock, only
+/// one at a time is allowed to progress, the others will block (spinning) until the spinlock is
+/// unlocked, at which point another CPU will be allowed to make progress.
+///
+/// Instances of [`SpinLock`] need a lock class and to be pinned. The recommended way to create such
+/// instances is with the [`pin_init`](crate::pin_init) and [`new_spinlock`] macros.
+///
+/// # Examples
+///
+/// The following example shows how to declare, allocate and initialise a struct (`Example`) that
+/// contains an inner struct (`Inner`) that is protected by a spinlock.
+///
+/// ```
+/// use kernel::{init::InPlaceInit, init::PinInit, new_spinlock, pin_init, sync::SpinLock};
+///
+/// struct Inner {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// #[pin_data]
+/// struct Example {
+/// c: u32,
+/// #[pin]
+/// d: SpinLock<Inner>,
+/// }
+///
+/// impl Example {
+/// fn new() -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// c: 10,
+/// d <- new_spinlock!(Inner { a: 20, b: 30 }),
+/// })
+/// }
+/// }
+///
+/// // Allocate a boxed `Example`.
+/// let e = Box::pin_init(Example::new())?;
+/// assert_eq!(e.c, 10);
+/// assert_eq!(e.d.lock().a, 20);
+/// assert_eq!(e.d.lock().b, 30);
+/// ```
+///
+/// The following example shows how to use interior mutability to modify the contents of a struct
+/// protected by a spinlock despite only having a shared reference:
+///
+/// ```
+/// use kernel::sync::SpinLock;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn example(m: &SpinLock<Example>) {
+/// let mut guard = m.lock();
+/// guard.a += 10;
+/// guard.b += 20;
+/// }
+/// ```
+///
+/// [`spinlock_t`]: ../../../../include/linux/spinlock.h
+pub type SpinLock<T> = super::Lock<T, SpinLockBackend>;
+
+/// A kernel `spinlock_t` lock backend.
+pub struct SpinLockBackend;
+
+// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the
+// default implementation that always calls the same locking method.
+unsafe impl super::Backend for SpinLockBackend {
+ type State = bindings::spinlock_t;
+ type GuardState = ();
+
+ unsafe fn init(
+ ptr: *mut Self::State,
+ name: *const core::ffi::c_char,
+ key: *mut bindings::lock_class_key,
+ ) {
+ // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
+ // `key` are valid for read indefinitely.
+ unsafe { bindings::__spin_lock_init(ptr, name, key) }
+ }
+
+ unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
+ // SAFETY: The safety requirements of this function ensure that `ptr` points to valid
+ // memory, and that it has been initialised before.
+ unsafe { bindings::spin_lock(ptr) }
+ }
+
+ unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
+ // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
+ // caller is the owner of the mutex.
+ unsafe { bindings::spin_unlock(ptr) }
+ }
+}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
new file mode 100644
index 000000000000..b17ee5cd98f3
--- /dev/null
+++ b/rust/kernel/sync/locked_by.rs
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A wrapper for data protected by a lock that does not wrap it.
+
+use super::{lock::Backend, lock::Lock};
+use crate::build_assert;
+use core::{cell::UnsafeCell, mem::size_of, ptr};
+
+/// Allows access to some data to be serialised by a lock that does not wrap it.
+///
+/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g.,
+/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not
+/// possible. For example, if a container has a lock and some data in the contained elements needs
+/// to be protected by the same lock.
+///
+/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it
+/// when the caller shows evidence that the 'external' lock is locked. It panics if the evidence
+/// refers to the wrong instance of the lock.
+///
+/// # Examples
+///
+/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an
+/// aggregate of all `InnerFile::bytes_used` and must be kept consistent; so we wrap `InnerFile` in
+/// a `LockedBy` so that it shares a lock with `InnerDirectory`. This allows us to enforce at
+/// compile-time that access to `InnerFile` is only granted when an `InnerDirectory` is also
+/// locked; we enforce at run time that the right `InnerDirectory` is locked.
+///
+/// ```
+/// use kernel::sync::{LockedBy, Mutex};
+///
+/// struct InnerFile {
+/// bytes_used: u64,
+/// }
+///
+/// struct File {
+/// _ino: u32,
+/// inner: LockedBy<InnerFile, InnerDirectory>,
+/// }
+///
+/// struct InnerDirectory {
+/// /// The sum of the bytes used by all files.
+/// bytes_used: u64,
+/// _files: Vec<File>,
+/// }
+///
+/// struct Directory {
+/// _ino: u32,
+/// inner: Mutex<InnerDirectory>,
+/// }
+///
+/// /// Prints `bytes_used` from both the directory and file.
+/// fn print_bytes_used(dir: &Directory, file: &File) {
+/// let guard = dir.inner.lock();
+/// let inner_file = file.inner.access(&guard);
+/// pr_info!("{} {}", guard.bytes_used, inner_file.bytes_used);
+/// }
+///
+/// /// Increments `bytes_used` for both the directory and file.
+/// fn inc_bytes_used(dir: &Directory, file: &File) {
+/// let mut guard = dir.inner.lock();
+/// guard.bytes_used += 10;
+///
+/// let file_inner = file.inner.access_mut(&mut guard);
+/// file_inner.bytes_used += 10;
+/// }
+///
+/// /// Creates a new file.
+/// fn new_file(ino: u32, dir: &Directory) -> File {
+/// File {
+/// _ino: ino,
+/// inner: LockedBy::new(&dir.inner, InnerFile { bytes_used: 0 }),
+/// }
+/// }
+/// ```
+pub struct LockedBy<T: ?Sized, U: ?Sized> {
+ owner: *const U,
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send, U: ?Sized> Send for LockedBy<T, U> {}
+
+// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send, U: ?Sized> Sync for LockedBy<T, U> {}
+
+impl<T, U> LockedBy<T, U> {
+ /// Constructs a new instance of [`LockedBy`].
+ ///
+ /// It stores a raw pointer to the owner that is never dereferenced. It is only used to ensure
+ /// that the right owner is being used to access the protected data. If the owner is freed, the
+ /// data becomes inaccessible; if another instance of the owner is allocated *on the same
+ /// memory location*, the data becomes accessible again: none of this affects memory safety
+ /// because in any case at most one thread (or CPU) can access the protected data at a time.
+ pub fn new<B: Backend>(owner: &Lock<U, B>, data: T) -> Self {
+ build_assert!(
+ size_of::<Lock<U, B>>() > 0,
+ "The lock type cannot be a ZST because it may be impossible to distinguish instances"
+ );
+ Self {
+ owner: owner.data.get(),
+ data: UnsafeCell::new(data),
+ }
+ }
+}
+
+impl<T: ?Sized, U> LockedBy<T, U> {
+ /// Returns a reference to the protected data when the caller provides evidence (via a
+ /// reference) that the owner is locked.
+ ///
+ /// `U` cannot be a zero-sized type (ZST) because there are ways to get an `&U` that matches
+ /// the data protected by the lock without actually holding it.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `owner` is different from the data protected by the lock used in
+ /// [`new`](LockedBy::new).
+ pub fn access<'a>(&'a self, owner: &'a U) -> &'a T {
+ build_assert!(
+ size_of::<U>() > 0,
+ "`U` cannot be a ZST because `owner` wouldn't be unique"
+ );
+ if !ptr::eq(owner, self.owner) {
+ panic!("mismatched owners");
+ }
+
+ // SAFETY: `owner` is evidence that the owner is locked.
+ unsafe { &*self.data.get() }
+ }
+
+ /// Returns a mutable reference to the protected data when the caller provides evidence (via a
+ /// mutable owner) that the owner is locked mutably.
+ ///
+ /// `U` cannot be a zero-sized type (ZST) because there are ways to get an `&mut U` that
+ /// matches the data protected by the lock without actually holding it.
+ ///
+ /// Showing a mutable reference to the owner is sufficient because we know no other references
+ /// can exist to it.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `owner` is different from the data protected by the lock used in
+ /// [`new`](LockedBy::new).
+ pub fn access_mut<'a>(&'a self, owner: &'a mut U) -> &'a mut T {
+ build_assert!(
+ size_of::<U>() > 0,
+ "`U` cannot be a ZST because `owner` wouldn't be unique"
+ );
+ if !ptr::eq(owner, self.owner) {
+ panic!("mismatched owners");
+ }
+
+ // SAFETY: `owner` is evidence that there is only one reference to the owner.
+ unsafe { &mut *self.data.get() }
+ }
+}