diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 79897841a2cc8ad329e5237fa5b9f210cfcab5b7..3c0e0e31df0ec8894dd79ea99585667f6ed545ce 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -96,11 +96,17 @@ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, struct lock_class_key *key, short inner); +static inline void _raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key) +{ + __raw_spin_lock_init(lock, name, key, LD_WAIT_SPIN); +} + # define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ - __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ + _raw_spin_lock_init((lock), #lock, &__key); \ } while (0) #else @@ -331,12 +337,17 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) #ifdef CONFIG_DEBUG_SPINLOCK -# define spin_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __raw_spin_lock_init(spinlock_check(lock), \ - #lock, &__key, LD_WAIT_CONFIG); \ +static inline void __spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key) +{ + __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG); +} + +# define spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __spin_lock_init(lock, #lock, &__key); \ } while (0) #else diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs new file mode 100644 index 0000000000000000000000000000000000000000..66536fe2ba18fe02c6575bc033c424272574c48e --- /dev/null +++ b/rust/kernel/sync.rs @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Synchronisation primitives. +//! +//! This module contains the kernel APIs related to synchronisation that have been ported or +//! wrapped for usage by Rust code in the kernel and is shared by all of them. +//! +//! # Example +//! +//! ``` +//! # use kernel::mutex_init; +//! # use kernel::sync::Mutex; +//! # use alloc::boxed::Box; +//! # use core::pin::Pin; +//! // SAFETY: `init` is called below. +//! let mut data = Pin::from(Box::try_new(unsafe { Mutex::new(10) }).unwrap()); +//! mutex_init!(data.as_mut(), "test::data"); +//! +//! assert_eq!(*data.lock(), 10); +//! *data.lock() = 20; +//! assert_eq!(*data.lock(), 20); +//! ``` + +use crate::{bindings, str::CStr}; +use core::pin::Pin; + +mod arc; +mod condvar; +mod guard; +mod locked_by; +mod mutex; +mod nowait; +mod revocable; +mod rwsem; +mod seqlock; +pub mod smutex; +mod spinlock; + +pub use arc::{Ref, RefBorrow, UniqueRef}; +pub use condvar::CondVar; +pub use guard::{Guard, Lock, LockFactory, LockInfo, LockIniter, ReadLock, WriteLock}; +pub use locked_by::LockedBy; +pub use mutex::{Mutex, RevocableMutex, RevocableMutexGuard}; +pub use nowait::{NoWaitLock, NoWaitLockGuard}; +pub use revocable::{Revocable, RevocableGuard}; +pub use rwsem::{RevocableRwSemaphore, RevocableRwSemaphoreGuard, RwSemaphore}; +pub use seqlock::{SeqLock, SeqLockReadGuard}; +pub use spinlock::{RawSpinLock, SpinLock}; + +/// Safely initialises an object that has an `init` function that takes a name and a lock class as +/// arguments, examples of these are [`Mutex`] and [`SpinLock`]. Each of them also provides a more +/// specialised name that uses this macro. +#[doc(hidden)] +#[macro_export] +macro_rules! init_with_lockdep { + ($obj:expr, $name:expr) => {{ + static mut CLASS1: core::mem::MaybeUninit<$crate::bindings::lock_class_key> = + core::mem::MaybeUninit::uninit(); + static mut CLASS2: core::mem::MaybeUninit<$crate::bindings::lock_class_key> = + core::mem::MaybeUninit::uninit(); + let obj = $obj; + let name = $crate::c_str!($name); + // SAFETY: `CLASS1` and `CLASS2` are never used by Rust code directly; the C portion of the + // kernel may change it though. + #[allow(unused_unsafe)] + unsafe { + $crate::sync::NeedsLockClass::init(obj, name, CLASS1.as_mut_ptr(), CLASS2.as_mut_ptr()) + }; + }}; +} + +/// A trait for types that need a lock class during initialisation. +/// +/// Implementers of this trait benefit from the [`init_with_lockdep`] macro that generates a new +/// class for each initialisation call site. +pub trait NeedsLockClass { + /// Initialises the type instance so that it can be safely used. + /// + /// Callers are encouraged to use the [`init_with_lockdep`] macro as it automatically creates a + /// new lock class on each usage. + /// + /// # Safety + /// + /// `key1` and `key2` must point to valid memory locations and remain valid until `self` is + /// dropped. + unsafe fn init( + self: Pin<&mut Self>, + name: &'static CStr, + key1: *mut bindings::lock_class_key, + key2: *mut bindings::lock_class_key, + ); +} + +/// Automatically initialises static instances of synchronisation primitives. +/// +/// The syntax resembles that of regular static variables, except that the value assigned is that +/// of the protected type (if one exists). In the examples below, all primitives except for +/// [`CondVar`] require the inner value to be supplied. +/// +/// # Examples +/// +/// ```ignore +/// # use kernel::{init_static_sync, sync::{CondVar, Mutex, RevocableMutex, SpinLock}}; +/// struct Test { +/// a: u32, +/// b: u32, +/// } +/// +/// init_static_sync! { +/// static A: Mutex = Test { a: 10, b: 20 }; +/// +/// /// Documentation for `B`. +/// pub static B: Mutex = 0; +/// +/// pub(crate) static C: SpinLock = Test { a: 10, b: 20 }; +/// static D: CondVar; +/// +/// static E: RevocableMutex = Test { a: 30, b: 40 }; +/// } +/// ``` +#[macro_export] +macro_rules! init_static_sync { + ($($(#[$outer:meta])* $v:vis static $id:ident : $t:ty $(= $value:expr)?;)*) => { + $( + $(#[$outer])* + $v static $id: $t = { + #[link_section = ".init_array"] + #[used] + static TMP: extern "C" fn() = { + extern "C" fn constructor() { + // SAFETY: This locally-defined function is only called from a constructor, + // which guarantees that `$id` is not accessible from other threads + // concurrently. + #[allow(clippy::cast_ref_to_mut)] + let mutable = unsafe { &mut *(&$id as *const _ as *mut $t) }; + // SAFETY: It's a shared static, so it cannot move. + let pinned = unsafe { core::pin::Pin::new_unchecked(mutable) }; + $crate::init_with_lockdep!(pinned, stringify!($id)); + } + constructor + }; + $crate::init_static_sync!(@call_new $t, $($value)?) + }; + )* + }; + (@call_new $t:ty, $value:expr) => {{ + let v = $value; + // SAFETY: the initialisation function is called by the constructor above. + unsafe { <$t>::new(v) } + }}; + (@call_new $t:ty,) => { + // SAFETY: the initialisation function is called by the constructor above. + unsafe { <$t>::new() } + }; +} + +/// Reschedules the caller's task if needed. +pub fn cond_resched() -> bool { + // SAFETY: No arguments, reschedules `current` if needed. + unsafe { bindings::cond_resched() != 0 } +} diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs new file mode 100644 index 0000000000000000000000000000000000000000..056d2bae632ad8087290a39b78ceee1ad700e8a5 --- /dev/null +++ b/rust/kernel/sync/arc.rs @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A reference-counted pointer. +//! +//! This module implements a way for users to create reference-counted objects and pointers to +//! them. Such a pointer automatically increments and decrements the count, and drops the +//! underlying object when it reaches zero. It is also safe to use concurrently from multiple +//! threads. +//! +//! It is different from the standard library's [`Arc`] in a few ways: +//! 1. It is backed by the kernel's `refcount_t` type. +//! 2. It does not support weak references, which allows it to be half the size. +//! 3. It saturates the reference count instead of aborting when it goes over a threshold. +//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned. +//! +//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html + +use crate::{bindings, error::code::*, Error, Opaque, Result}; +use alloc::{ + alloc::{alloc, dealloc}, + vec::Vec, +}; +use core::{ + alloc::Layout, + convert::{AsRef, TryFrom}, + marker::{PhantomData, Unsize}, + mem::{ManuallyDrop, MaybeUninit}, + ops::{Deref, DerefMut}, + pin::Pin, + ptr::{self, NonNull}, +}; + +/// A reference-counted pointer to an instance of `T`. +/// +/// The reference count is incremented when new instances of [`Ref`] are created, and decremented +/// when they are dropped. When the count reaches zero, the underlying `T` is also dropped. +/// +/// # Invariants +/// +/// The reference count on an instance of [`Ref`] is always non-zero. +/// The object pointed to by [`Ref`] is always pinned. +pub struct Ref { + ptr: NonNull>, + _p: PhantomData>, +} + +#[repr(C)] +struct RefInner { + refcount: Opaque, + data: T, +} + +// This is to allow [`Ref`] (and variants) to be used as the type of `self`. +impl core::ops::Receiver for Ref {} + +// This is to allow [`RefBorrow`] (and variants) to be used as the type of `self`. +impl core::ops::Receiver for RefBorrow<'_, T> {} + +// This is to allow coercion from `Ref` to `Ref` if `T` can be converted to the +// dynamically-sized type (DST) `U`. +impl, U: ?Sized> core::ops::CoerceUnsized> for Ref {} + +// This is to allow `Ref` to be dispatched on when `Ref` can be coerced into `Ref`. +impl, U: ?Sized> core::ops::DispatchFromDyn> for Ref {} + +// SAFETY: It is safe to send `Ref` to another thread when the underlying `T` is `Sync` because +// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs +// `T` to be `Send` because any thread that has a `Ref` may ultimately access `T` directly, for +// example, when the reference count reaches zero and `T` is dropped. +unsafe impl Send for Ref {} + +// SAFETY: It is safe to send `&Ref` to another thread when the underlying `T` is `Sync` for +// the same reason as above. `T` needs to be `Send` as well because a thread can clone a `&Ref` +// into a `Ref`, which may lead to `T` being accessed by the same reasoning as above. +unsafe impl Sync for Ref {} + +impl Ref { + /// Constructs a new reference counted instance of `T`. + pub fn try_new(contents: T) -> Result { + let layout = Layout::new::>(); + // SAFETY: The layout size is guaranteed to be non-zero because `RefInner` contains the + // reference count. + let inner = NonNull::new(unsafe { alloc(layout) }) + .ok_or(ENOMEM)? + .cast::>(); + + // INVARIANT: The refcount is initialised to a non-zero value. + let value = RefInner { + // SAFETY: Just an FFI call that returns a `refcount_t` initialised to 1. + refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }), + data: contents, + }; + // SAFETY: `inner` is writable and properly aligned. + unsafe { inner.as_ptr().write(value) }; + + // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new + // `Ref` object. + Ok(unsafe { Self::from_inner(inner) }) + } + + /// Deconstructs a [`Ref`] object into a `usize`. + /// + /// It can be reconstructed once via [`Ref::from_usize`]. + pub fn into_usize(obj: Self) -> usize { + ManuallyDrop::new(obj).ptr.as_ptr() as _ + } + + /// Borrows a [`Ref`] instance previously deconstructed via [`Ref::into_usize`]. + /// + /// # Safety + /// + /// `encoded` must have been returned by a previous call to [`Ref::into_usize`]. Additionally, + /// [`Ref::from_usize`] can only be called after *all* instances of [`RefBorrow`] have been + /// dropped. + pub unsafe fn borrow_usize<'a>(encoded: usize) -> RefBorrow<'a, T> { + // SAFETY: By the safety requirement of this function, we know that `encoded` came from + // a previous call to `Ref::into_usize`. + let inner = NonNull::new(encoded as *mut RefInner).unwrap(); + + // SAFETY: The safety requirements ensure that the object remains alive for the lifetime of + // the returned value. There is no way to create mutable references to the object. + unsafe { RefBorrow::new(inner) } + } + + /// Recreates a [`Ref`] instance previously deconstructed via [`Ref::into_usize`]. + /// + /// # Safety + /// + /// `encoded` must have been returned by a previous call to [`Ref::into_usize`]. Additionally, + /// it can only be called once for each previous call to [`Ref::into_usize`]. + pub unsafe fn from_usize(encoded: usize) -> Self { + // SAFETY: By the safety invariants we know that `encoded` came from `Ref::into_usize`, so + // the reference count held then will be owned by the new `Ref` object. + unsafe { Self::from_inner(NonNull::new(encoded as _).unwrap()) } + } +} + +impl Ref { + /// Constructs a new [`Ref`] from an existing [`RefInner`]. + /// + /// # Safety + /// + /// The caller must ensure that `inner` points to a valid location and has a non-zero reference + /// count, one of which will be owned by the new [`Ref`] instance. + unsafe fn from_inner(inner: NonNull>) -> Self { + // INVARIANT: By the safety requirements, the invariants hold. + Ref { + ptr: inner, + _p: PhantomData, + } + } + + /// Determines if two reference-counted pointers point to the same underlying instance of `T`. + pub fn ptr_eq(a: &Self, b: &Self) -> bool { + ptr::eq(a.ptr.as_ptr(), b.ptr.as_ptr()) + } + + /// Deconstructs a [`Ref`] object into a raw pointer. + /// + /// It can be reconstructed once via [`Ref::from_raw`]. + pub fn into_raw(obj: Self) -> *const T { + let ret = &*obj as *const T; + core::mem::forget(obj); + ret + } + + /// Recreates a [`Ref`] instance previously deconstructed via [`Ref::into_raw`]. + /// + /// This code relies on the `repr(C)` layout of structs as described in + /// . + /// + /// # Safety + /// + /// `ptr` must have been returned by a previous call to [`Ref::into_raw`]. Additionally, it + /// can only be called once for each previous call to [`Ref::into_raw`]. + pub unsafe fn from_raw(ptr: *const T) -> Self { + // SAFETY: The safety requirement ensures that the pointer is valid. + let align = core::mem::align_of_val(unsafe { &*ptr }); + let offset = Layout::new::>() + .align_to(align) + .unwrap() + .pad_to_align() + .size(); + // SAFETY: The pointer is in bounds because by the safety requirements `ptr` came from + // `Ref::into_raw`, so it is a pointer `offset` bytes from the beginning of the allocation. + let data = unsafe { (ptr as *const u8).sub(offset) }; + let metadata = ptr::metadata(ptr as *const RefInner); + let ptr = ptr::from_raw_parts_mut(data as _, metadata); + // SAFETY: By the safety requirements we know that `ptr` came from `Ref::into_raw`, so the + // reference count held then will be owned by the new `Ref` object. + unsafe { Self::from_inner(NonNull::new(ptr).unwrap()) } + } + + /// Returns a [`RefBorrow`] from the given [`Ref`]. + /// + /// This is useful when the argument of a function call is a [`RefBorrow`] (e.g., in a method + /// receiver), but we have a [`Ref`] instead. Getting a [`RefBorrow`] is free when optimised. + #[inline] + pub fn as_ref_borrow(&self) -> RefBorrow<'_, T> { + // SAFETY: The constraint that lifetime of the shared reference must outlive that of + // the returned `RefBorrow` ensures that the object remains alive. + unsafe { RefBorrow::new(self.ptr) } + } +} + +impl Deref for Ref { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is + // safe to dereference it. + unsafe { &self.ptr.as_ref().data } + } +} + +impl Clone for Ref { + fn clone(&self) -> Self { + // INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero. + // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is + // safe to increment the refcount. + unsafe { bindings::refcount_inc(self.ptr.as_ref().refcount.get()) }; + + // SAFETY: We just incremented the refcount. This increment is now owned by the new `Ref`. + unsafe { Self::from_inner(self.ptr) } + } +} + +impl AsRef for Ref { + fn as_ref(&self) -> &T { + // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is + // safe to dereference it. + unsafe { &self.ptr.as_ref().data } + } +} + +impl Drop for Ref { + fn drop(&mut self) { + // SAFETY: By the type invariant, there is necessarily a reference to the object. We cannot + // touch `refcount` after it's decremented to a non-zero value because another thread/CPU + // may concurrently decrement it to zero and free it. It is ok to have a raw pointer to + // freed/invalid memory as long as it is never dereferenced. + let refcount = unsafe { self.ptr.as_ref() }.refcount.get(); + + // INVARIANT: If the refcount reaches zero, there are no other instances of `Ref`, and + // this instance is being dropped, so the broken invariant is not observable. + // SAFETY: Also by the type invariant, we are allowed to decrement the refcount. + let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) }; + if is_zero { + // The count reached zero, we must free the memory. + + // SAFETY: This thread holds the only remaining reference to `self`, so it is safe to + // get a mutable reference to it. + let inner = unsafe { self.ptr.as_mut() }; + let layout = Layout::for_value(inner); + // SAFETY: The value stored in inner is valid. + unsafe { core::ptr::drop_in_place(inner) }; + // SAFETY: The pointer was initialised from the result of a call to `alloc`. + unsafe { dealloc(self.ptr.cast().as_ptr(), layout) }; + } + } +} + +impl TryFrom> for Ref<[T]> { + type Error = Error; + + fn try_from(mut v: Vec) -> Result { + let value_layout = Layout::array::(v.len())?; + let layout = Layout::new::>() + .extend(value_layout)? + .0 + .pad_to_align(); + // SAFETY: The layout size is guaranteed to be non-zero because `RefInner` contains the + // reference count. + let ptr = NonNull::new(unsafe { alloc(layout) }).ok_or(ENOMEM)?; + let inner = + core::ptr::slice_from_raw_parts_mut(ptr.as_ptr() as _, v.len()) as *mut RefInner<[T]>; + + // SAFETY: Just an FFI call that returns a `refcount_t` initialised to 1. + let count = Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }); + // SAFETY: `inner.refcount` is writable and properly aligned. + unsafe { core::ptr::addr_of_mut!((*inner).refcount).write(count) }; + // SAFETY: The contents of `v` as readable and properly aligned; `inner.data` is writable + // and properly aligned. There is no overlap between the two because `inner` is a new + // allocation. + unsafe { + core::ptr::copy_nonoverlapping( + v.as_ptr(), + core::ptr::addr_of_mut!((*inner).data) as *mut [T] as *mut T, + v.len(), + ) + }; + // SAFETY: We're setting the new length to zero, so it is <= to capacity, and old_len..0 is + // an empty range (so satisfies vacuously the requirement of being initialised). + unsafe { v.set_len(0) }; + // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new + // `Ref` object. + Ok(unsafe { Self::from_inner(NonNull::new(inner).unwrap()) }) + } +} + +impl From> for Ref { + fn from(item: UniqueRef) -> Self { + item.inner + } +} + +impl From> for Pin> { + fn from(obj: UniqueRef) -> Self { + // SAFETY: It is not possible to move/replace `T` inside a `Pin>` (unless `T` + // is `Unpin`), so it is ok to convert it to `Pin>`. + unsafe { Pin::new_unchecked(obj) } + } +} + +impl From>> for Ref { + fn from(item: Pin>) -> Self { + // SAFETY: The type invariants of `Ref` guarantee that the data is pinned. + unsafe { Pin::into_inner_unchecked(item).inner } + } +} + +/// A borrowed [`Ref`] with manually-managed lifetime. +/// +/// # Invariants +/// +/// There are no mutable references to the underlying [`Ref`], and it remains valid for the lifetime +/// of the [`RefBorrow`] instance. +pub struct RefBorrow<'a, T: ?Sized + 'a> { + inner: NonNull>, + _p: PhantomData<&'a ()>, +} + +impl Clone for RefBorrow<'_, T> { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for RefBorrow<'_, T> {} + +impl RefBorrow<'_, T> { + /// Creates a new [`RefBorrow`] instance. + /// + /// # Safety + /// + /// Callers must ensure the following for the lifetime of the returned [`RefBorrow`] instance: + /// 1. That `obj` remains valid; + /// 2. That no mutable references to `obj` are created. + unsafe fn new(inner: NonNull>) -> Self { + // INVARIANT: The safety requirements guarantee the invariants. + Self { + inner, + _p: PhantomData, + } + } +} + +impl From> for Ref { + fn from(b: RefBorrow<'_, T>) -> Self { + // SAFETY: The existence of `b` guarantees that the refcount is non-zero. `ManuallyDrop` + // guarantees that `drop` isn't called, so it's ok that the temporary `Ref` doesn't own the + // increment. + ManuallyDrop::new(unsafe { Ref::from_inner(b.inner) }) + .deref() + .clone() + } +} + +impl Deref for RefBorrow<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: By the type invariant, the underlying object is still alive with no mutable + // references to it, so it is safe to create a shared reference. + unsafe { &self.inner.as_ref().data } + } +} + +/// A refcounted object that is known to have a refcount of 1. +/// +/// It is mutable and can be converted to a [`Ref`] so that it can be shared. +/// +/// # Invariants +/// +/// `inner` always has a reference count of 1. +/// +/// # Examples +/// +/// In the following example, we make changes to the inner object before turning it into a +/// `Ref` object (after which point, it cannot be mutated directly). Note that `x.into()` +/// cannot fail. +/// +/// ``` +/// use kernel::sync::{Ref, UniqueRef}; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn test() -> Result> { +/// let mut x = UniqueRef::try_new(Example { a: 10, b: 20 })?; +/// x.a += 1; +/// x.b += 1; +/// Ok(x.into()) +/// } +/// +/// # test(); +/// ``` +/// +/// In the following example we first allocate memory for a ref-counted `Example` but we don't +/// initialise it on allocation. We do initialise it later with a call to [`UniqueRef::write`], +/// followed by a conversion to `Ref`. This is particularly useful when allocation happens +/// in one context (e.g., sleepable) and initialisation in another (e.g., atomic): +/// +/// ``` +/// use kernel::sync::{Ref, UniqueRef}; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn test() -> Result> { +/// let x = UniqueRef::try_new_uninit()?; +/// Ok(x.write(Example { a: 10, b: 20 }).into()) +/// } +/// +/// # test(); +/// ``` +/// +/// In the last example below, the caller gets a pinned instance of `Example` while converting to +/// `Ref`; this is useful in scenarios where one needs a pinned reference during +/// initialisation, for example, when initialising fields that are wrapped in locks. +/// +/// ``` +/// use kernel::sync::{Ref, UniqueRef}; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn test() -> Result> { +/// let mut pinned = Pin::from(UniqueRef::try_new(Example { a: 10, b: 20 })?); +/// // We can modify `pinned` because it is `Unpin`. +/// pinned.as_mut().a += 1; +/// Ok(pinned.into()) +/// } +/// +/// # test(); +/// ``` +pub struct UniqueRef { + inner: Ref, +} + +impl UniqueRef { + /// Tries to allocate a new [`UniqueRef`] instance. + pub fn try_new(value: T) -> Result { + Ok(Self { + // INVARIANT: The newly-created object has a ref-count of 1. + inner: Ref::try_new(value)?, + }) + } + + /// Tries to allocate a new [`UniqueRef`] instance whose contents are not initialised yet. + pub fn try_new_uninit() -> Result>> { + Ok(UniqueRef::> { + // INVARIANT: The newly-created object has a ref-count of 1. + inner: Ref::try_new(MaybeUninit::uninit())?, + }) + } +} + +impl UniqueRef> { + /// Converts a `UniqueRef>` into a `UniqueRef` by writing a value into it. + pub fn write(mut self, value: T) -> UniqueRef { + self.deref_mut().write(value); + let inner = ManuallyDrop::new(self).inner.ptr; + UniqueRef { + // SAFETY: The new `Ref` is taking over `ptr` from `self.inner` (which won't be + // dropped). The types are compatible because `MaybeUninit` is compatible with `T`. + inner: unsafe { Ref::from_inner(inner.cast()) }, + } + } +} + +impl Deref for UniqueRef { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +impl DerefMut for UniqueRef { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: By the `Ref` type invariant, there is necessarily a reference to the object, so + // it is safe to dereference it. Additionally, we know there is only one reference when + // it's inside a `UniqueRef`, so it is safe to get a mutable reference. + unsafe { &mut self.inner.ptr.as_mut().data } + } +} diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs new file mode 100644 index 0000000000000000000000000000000000000000..7f8aa1c55a19251cf0d94fde03674829b611c51e --- /dev/null +++ b/rust/kernel/sync/condvar.rs @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A condition variable. +//! +//! This module allows Rust code to use the kernel's [`struct wait_queue_head`] as a condition +//! variable. + +use super::{Guard, Lock, LockInfo, NeedsLockClass}; +use crate::{bindings, str::CStr, task::Task, Opaque}; +use core::{marker::PhantomPinned, pin::Pin}; + +/// Safely initialises a [`CondVar`] with the given name, generating a new lock class. +#[macro_export] +macro_rules! condvar_init { + ($condvar:expr, $name:literal) => { + $crate::init_with_lockdep!($condvar, $name) + }; +} + +// TODO: `bindgen` is not generating this constant. Figure out why. +const POLLFREE: u32 = 0x4000; + +/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to +/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And +/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or +/// [`CondVar::notify_all`]) or because the thread received a signal. +/// +/// [`struct wait_queue_head`]: ../../../include/linux/wait.h +pub struct CondVar { + pub(crate) wait_list: Opaque, + + /// A condvar needs to be pinned because it contains a [`struct list_head`] that is + /// self-referential, so it cannot be safely moved once it is initialised. + _pin: PhantomPinned, +} + +// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread. +#[allow(clippy::non_send_fields_in_send_ty)] +unsafe impl Send for CondVar {} + +// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads +// concurrently. +unsafe impl Sync for CondVar {} + +impl CondVar { + /// Constructs a new conditional variable. + /// + /// # Safety + /// + /// The caller must call `CondVar::init` before using the conditional variable. + pub const unsafe fn new() -> Self { + Self { + wait_list: Opaque::uninit(), + _pin: PhantomPinned, + } + } + + /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the + /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or + /// [`CondVar::notify_all`], or when the thread receives a signal. + /// + /// Returns whether there is a signal pending. + #[must_use = "wait returns if a signal is pending, so the caller must check the return value"] + pub fn wait, I: LockInfo>(&self, guard: &mut Guard<'_, L, I>) -> bool { + let lock = guard.lock; + let wait = Opaque::::uninit(); + + // SAFETY: `wait` points to valid memory. + unsafe { bindings::init_wait(wait.get()) }; + + // SAFETY: Both `wait` and `wait_list` point to valid memory. + unsafe { + bindings::prepare_to_wait_exclusive( + self.wait_list.get(), + wait.get(), + bindings::TASK_INTERRUPTIBLE as _, + ) + }; + + // SAFETY: The guard is evidence that the caller owns the lock. + unsafe { lock.unlock(&mut guard.context) }; + + // SAFETY: No arguments, switches to another thread. + unsafe { bindings::schedule() }; + + guard.context = lock.lock_noguard(); + + // SAFETY: Both `wait` and `wait_list` point to valid memory. + unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) }; + + Task::current().signal_pending() + } + + /// Calls the kernel function to notify the appropriate number of threads with the given flags. + fn notify(&self, count: i32, flags: u32) { + // SAFETY: `wait_list` points to valid memory. + unsafe { + bindings::__wake_up( + self.wait_list.get(), + bindings::TASK_NORMAL, + count, + flags as _, + ) + }; + } + + /// Wakes a single waiter up, if any. This is not 'sticky' in the sense that if no thread is + /// waiting, the notification is lost completely (as opposed to automatically waking up the + /// next waiter). + pub fn notify_one(&self) { + self.notify(1, 0); + } + + /// Wakes all waiters up, if any. This is not 'sticky' in the sense that if no thread is + /// waiting, the notification is lost completely (as opposed to automatically waking up the + /// next waiter). + pub fn notify_all(&self) { + self.notify(0, 0); + } + + /// Wakes all waiters up. If they were added by `epoll`, they are also removed from the list of + /// waiters. This is useful when cleaning up a condition variable that may be waited on by + /// threads that use `epoll`. + pub fn free_waiters(&self) { + self.notify(1, bindings::POLLHUP | POLLFREE); + } +} + +impl NeedsLockClass for CondVar { + unsafe fn init( + self: Pin<&mut Self>, + name: &'static CStr, + key: *mut bindings::lock_class_key, + _: *mut bindings::lock_class_key, + ) { + unsafe { bindings::__init_waitqueue_head(self.wait_list.get(), name.as_char_ptr(), key) }; + } +} diff --git a/rust/kernel/sync/guard.rs b/rust/kernel/sync/guard.rs new file mode 100644 index 0000000000000000000000000000000000000000..b825e0cf70b004a75e4201dff715ece079be8f3a --- /dev/null +++ b/rust/kernel/sync/guard.rs @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A generic lock guard and trait. +//! +//! This module contains a lock guard that can be used with any locking primitive that implements +//! the ([`Lock`]) trait. It also contains the definition of the trait, which can be leveraged by +//! other constructs to work on generic locking primitives. + +use super::NeedsLockClass; +use crate::{bindings, str::CStr, Bool, False, True}; +use core::pin::Pin; + +/// Allows mutual exclusion primitives that implement the [`Lock`] trait to automatically unlock +/// when a guard goes out of scope. It also provides a safe and convenient way to access the data +/// protected by the lock. +#[must_use = "the lock unlocks immediately when the guard is unused"] +pub struct Guard<'a, L: Lock + ?Sized, I: LockInfo = WriteLock> { + pub(crate) lock: &'a L, + pub(crate) context: L::GuardContext, +} + +// SAFETY: `Guard` is sync when the data protected by the lock is also sync. This is more +// conservative than the default compiler implementation; more details can be found on +// https://github.com/rust-lang/rust/issues/41622 -- it refers to `MutexGuard` from the standard +// library. +unsafe impl Sync for Guard<'_, L, I> +where + L: Lock + ?Sized, + L::Inner: Sync, + I: LockInfo, +{ +} + +impl + ?Sized, I: LockInfo> core::ops::Deref for Guard<'_, L, I> { + type Target = L::Inner; + + fn deref(&self) -> &Self::Target { + // SAFETY: The caller owns the lock, so it is safe to deref the protected data. + unsafe { &*self.lock.locked_data().get() } + } +} + +impl + ?Sized, I: LockInfo> core::ops::DerefMut for Guard<'_, L, I> { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: The caller owns the lock, so it is safe to deref the protected data. + unsafe { &mut *self.lock.locked_data().get() } + } +} + +impl + ?Sized, I: LockInfo> Drop for Guard<'_, L, I> { + fn drop(&mut self) { + // SAFETY: The caller owns the lock, so it is safe to unlock it. + unsafe { self.lock.unlock(&mut self.context) }; + } +} + +impl<'a, L: Lock + ?Sized, I: LockInfo> Guard<'a, L, I> { + /// Constructs a new immutable lock guard. + /// + /// # Safety + /// + /// The caller must ensure that it owns the lock. + pub(crate) unsafe fn new(lock: &'a L, context: L::GuardContext) -> Self { + Self { lock, context } + } +} + +/// Specifies properties of a lock. +pub trait LockInfo { + /// Determines if the data protected by a lock is writable. + type Writable: Bool; +} + +/// A marker for locks that only allow reading. +pub struct ReadLock; +impl LockInfo for ReadLock { + type Writable = False; +} + +/// A marker for locks that allow reading and writing. +pub struct WriteLock; +impl LockInfo for WriteLock { + type Writable = True; +} + +/// A generic mutual exclusion primitive. +/// +/// [`Guard`] is written such that any mutual exclusion primitive that can implement this trait can +/// also benefit from having an automatic way to unlock itself. +/// +/// # Safety +/// +/// - Implementers of this trait with the [`WriteLock`] marker must ensure that only one thread/CPU +/// may access the protected data once the lock is held, that is, between calls to `lock_noguard` +/// and `unlock`. +/// - Implementers of all other markers must ensure that a mutable reference to the protected data +/// is not active in any thread/CPU because at least one shared reference is active between calls +/// to `lock_noguard` and `unlock`. +pub unsafe trait Lock { + /// The type of the data protected by the lock. + type Inner: ?Sized; + + /// The type of context, if any, that needs to be stored in the guard. + type GuardContext; + + /// Acquires the lock, making the caller its owner. + #[must_use] + fn lock_noguard(&self) -> Self::GuardContext; + + /// Reacquires the lock, making the caller its owner. + /// + /// The guard context before the last unlock is passed in. + /// + /// Locks that don't require this state on relock can simply use the default implementation + /// that calls [`Lock::lock_noguard`]. + fn relock(&self, ctx: &mut Self::GuardContext) { + *ctx = self.lock_noguard(); + } + + /// Releases the lock, giving up ownership of the lock. + /// + /// # Safety + /// + /// It must only be called by the current owner of the lock. + unsafe fn unlock(&self, context: &mut Self::GuardContext); + + /// Returns the data protected by the lock. + fn locked_data(&self) -> &core::cell::UnsafeCell; +} + +/// A creator of instances of a mutual exclusion (lock) primitive. +pub trait LockFactory { + /// The parametrised type of the mutual exclusion primitive that can be created by this factory. + type LockedType; + + /// Constructs a new instance of the mutual exclusion primitive. + /// + /// # Safety + /// + /// The caller must call [`LockIniter::init_lock`] before using the lock. + unsafe fn new_lock(data: T) -> Self::LockedType; +} + +/// A lock that can be initialised with a single lock class key. +pub trait LockIniter { + /// Initialises the lock instance so that it can be safely used. + /// + /// # Safety + /// + /// `key` must point to a valid memory location that will remain valid until the lock is + /// dropped. + unsafe fn init_lock( + self: Pin<&mut Self>, + name: &'static CStr, + key: *mut bindings::lock_class_key, + ); +} + +impl NeedsLockClass for L { + unsafe fn init( + self: Pin<&mut Self>, + name: &'static CStr, + key: *mut bindings::lock_class_key, + _: *mut bindings::lock_class_key, + ) { + // SAFETY: The safety requirements of this function satisfy those of `init_lock`. + unsafe { self.init_lock(name, key) }; + } +} diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs new file mode 100644 index 0000000000000000000000000000000000000000..334935fb1e37c77f91783adab9f3aeed24828c9c --- /dev/null +++ b/rust/kernel/sync/locked_by.rs @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A wrapper for data protected by a lock that does not wrap it. + +use super::{Guard, Lock}; +use core::{cell::UnsafeCell, ops::Deref, ptr}; + +/// Allows access to some data to be serialised by a lock that does not wrap it. +/// +/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g., +/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not +/// possible. For example, if a container has a lock and some data in the contained elements needs +/// to be protected by the same lock. +/// +/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it +/// when the caller shows evidence that 'external' lock is locked. +/// +/// # Example +/// +/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an +/// aggregate of all `InnerFile::bytes_used` and must be kept consistent; so we wrap `InnerFile` in +/// a `LockedBy` so that it shares a lock with `InnerDirectory`. This allows us to enforce at +/// compile-time that access to `InnerFile` is only granted when an `InnerDirectory` is also +/// locked; we enforce at run time that the right `InnerDirectory` is locked. +/// +/// ``` +/// use kernel::sync::{LockedBy, Mutex}; +/// +/// struct InnerFile { +/// bytes_used: u64, +/// } +/// +/// struct File { +/// name: String, +/// inner: LockedBy>, +/// } +/// +/// struct InnerDirectory { +/// /// The sum of the bytes used by all files. +/// bytes_used: u64, +/// files: Vec, +/// } +/// +/// struct Directory { +/// name: String, +/// inner: Mutex, +/// } +/// ``` +pub struct LockedBy { + owner: *const L::Inner, + data: UnsafeCell, +} + +// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can. +unsafe impl Send for LockedBy {} + +// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the +// data it protects is `Send`. +unsafe impl Sync for LockedBy {} + +impl LockedBy { + /// Constructs a new instance of [`LockedBy`]. + /// + /// It stores a raw pointer to the owner that is never dereferenced. It is only used to ensure + /// that the right owner is being used to access the protected data. If the owner is freed, the + /// data becomes inaccessible; if another instance of the owner is allocated *on the same + /// memory location*, the data becomes accessible again: none of this affects memory safety + /// because in any case at most one thread (or CPU) can access the protected data at a time. + pub fn new(owner: &L, data: T) -> Self { + Self { + owner: owner.locked_data().get(), + data: UnsafeCell::new(data), + } + } +} + +impl LockedBy { + /// Returns a reference to the protected data when the caller provides evidence (via a + /// [`Guard`]) that the owner is locked. + pub fn access<'a>(&'a self, guard: &'a Guard<'_, L>) -> &'a T { + if !ptr::eq(guard.deref(), self.owner) { + panic!("guard does not match owner"); + } + + // SAFETY: `guard` is evidence that the owner is locked. + unsafe { &mut *self.data.get() } + } + + /// Returns a mutable reference to the protected data when the caller provides evidence (via a + /// mutable [`Guard`]) that the owner is locked mutably. + pub fn access_mut<'a>(&'a self, guard: &'a mut Guard<'_, L>) -> &'a mut T { + if !ptr::eq(guard.deref().deref(), self.owner) { + panic!("guard does not match owner"); + } + + // SAFETY: `guard` is evidence that the owner is locked. + unsafe { &mut *self.data.get() } + } + + /// Returns a mutable reference to the protected data when the caller provides evidence (via a + /// mutable owner) that the owner is locked mutably. Showing a mutable reference to the owner + /// is sufficient because we know no other references can exist to it. + pub fn access_from_mut<'a>(&'a self, owner: &'a mut L::Inner) -> &'a mut T { + if !ptr::eq(owner, self.owner) { + panic!("mismatched owners"); + } + + // SAFETY: `owner` is evidence that there is only one reference to the owner. + unsafe { &mut *self.data.get() } + } +} diff --git a/rust/kernel/sync/mutex.rs b/rust/kernel/sync/mutex.rs new file mode 100644 index 0000000000000000000000000000000000000000..fac846b00b08129df0f9099fe0a8e25d49805937 --- /dev/null +++ b/rust/kernel/sync/mutex.rs @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A kernel mutex. +//! +//! This module allows Rust code to use the kernel's [`struct mutex`]. + +use super::{Guard, Lock, LockFactory, LockIniter, WriteLock}; +use crate::{bindings, str::CStr, Opaque}; +use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin}; + +/// Safely initialises a [`Mutex`] with the given name, generating a new lock class. +#[macro_export] +macro_rules! mutex_init { + ($mutex:expr, $name:literal) => { + $crate::init_with_lockdep!($mutex, $name) + }; +} + +/// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex, +/// only one at a time is allowed to progress, the others will block (sleep) until the mutex is +/// unlocked, at which point another thread will be allowed to wake up and make progress. +/// +/// A [`Mutex`] must first be initialised with a call to [`Mutex::init_lock`] before it can be +/// used. The [`mutex_init`] macro is provided to automatically assign a new lock class to a mutex +/// instance. +/// +/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts. +/// +/// [`struct mutex`]: ../../../include/linux/mutex.h +pub struct Mutex { + /// The kernel `struct mutex` object. + mutex: Opaque, + + /// A mutex needs to be pinned because it contains a [`struct list_head`] that is + /// self-referential, so it cannot be safely moved once it is initialised. + _pin: PhantomPinned, + + /// The data protected by the mutex. + data: UnsafeCell, +} + +// SAFETY: `Mutex` can be transferred across thread boundaries iff the data it protects can. +#[allow(clippy::non_send_fields_in_send_ty)] +unsafe impl Send for Mutex {} + +// SAFETY: `Mutex` serialises the interior mutability it provides, so it is `Sync` as long as the +// data it protects is `Send`. +unsafe impl Sync for Mutex {} + +impl Mutex { + /// Constructs a new mutex. + /// + /// # Safety + /// + /// The caller must call [`Mutex::init_lock`] before using the mutex. + pub const unsafe fn new(t: T) -> Self { + Self { + mutex: Opaque::uninit(), + data: UnsafeCell::new(t), + _pin: PhantomPinned, + } + } +} + +impl Mutex { + /// Locks the mutex and gives the caller access to the data protected by it. Only one thread at + /// a time is allowed to access the protected data. + pub fn lock(&self) -> Guard<'_, Self> { + let ctx = self.lock_noguard(); + // SAFETY: The mutex was just acquired. + unsafe { Guard::new(self, ctx) } + } +} + +impl LockFactory for Mutex { + type LockedType = Mutex; + + unsafe fn new_lock(data: U) -> Mutex { + // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called. + unsafe { Mutex::new(data) } + } +} + +impl LockIniter for Mutex { + unsafe fn init_lock( + self: Pin<&mut Self>, + name: &'static CStr, + key: *mut bindings::lock_class_key, + ) { + unsafe { bindings::__mutex_init(self.mutex.get(), name.as_char_ptr(), key) }; + } +} + +pub struct EmptyGuardContext; + +// SAFETY: The underlying kernel `struct mutex` object ensures mutual exclusion. +unsafe impl Lock for Mutex { + type Inner = T; + type GuardContext = EmptyGuardContext; + + fn lock_noguard(&self) -> EmptyGuardContext { + // SAFETY: `mutex` points to valid memory. + unsafe { bindings::mutex_lock(self.mutex.get()) }; + EmptyGuardContext + } + + unsafe fn unlock(&self, _: &mut EmptyGuardContext) { + // SAFETY: The safety requirements of the function ensure that the mutex is owned by the + // caller. + unsafe { bindings::mutex_unlock(self.mutex.get()) }; + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +} + +/// A revocable mutex. +/// +/// That is, a mutex to which access can be revoked at runtime. It is a specialisation of the more +/// generic [`super::revocable::Revocable`]. +/// +/// # Examples +/// +/// ``` +/// # use kernel::sync::RevocableMutex; +/// # use kernel::revocable_init; +/// # use core::pin::Pin; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn read_sum(v: &RevocableMutex) -> Option { +/// let guard = v.try_write()?; +/// Some(guard.a + guard.b) +/// } +/// +/// // SAFETY: We call `revocable_init` immediately below. +/// let mut v = unsafe { RevocableMutex::new(Example { a: 10, b: 20 }) }; +/// // SAFETY: We never move out of `v`. +/// let pinned = unsafe { Pin::new_unchecked(&mut v) }; +/// revocable_init!(pinned, "example::v"); +/// assert_eq!(read_sum(&v), Some(30)); +/// v.revoke(); +/// assert_eq!(read_sum(&v), None); +/// ``` +pub type RevocableMutex = super::revocable::Revocable, T>; + +/// A guard for a revocable mutex. +pub type RevocableMutexGuard<'a, T, I = WriteLock> = + super::revocable::RevocableGuard<'a, Mutex<()>, T, I>; diff --git a/rust/kernel/sync/nowait.rs b/rust/kernel/sync/nowait.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9ee2f9a360743369b9e76da53ff3ccca1423f8e --- /dev/null +++ b/rust/kernel/sync/nowait.rs @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A lock that never waits. + +use core::cell::UnsafeCell; +use core::sync::atomic::{AtomicU8, Ordering}; + +const LOCKED: u8 = 1; +const CONTENDED: u8 = 2; + +/// A lock that only offers a [`try_lock`](NoWaitLock::try_lock) method. +/// +/// That is, on contention it doesn't offer a way for the caller to block waiting for the current +/// owner to release the lock. This is useful for best-effort kind of scenarios where waiting is +/// never needed: in such cases, users don't need a full-featured mutex or spinlock. +/// +/// When the lock is released via call to [`NoWaitLockGuard::unlock`], it indicates to the caller +/// whether there was contention (i.e., if another thread tried and failed to acquire this lock). +/// If the return value is `false`, there was definitely no contention but if it is `true`, it's +/// possible that the contention was when attempting to acquire the lock. +/// +/// # Examples +/// +/// ``` +/// use kernel::sync::NoWaitLock; +/// +/// #[derive(PartialEq)] +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// let x = NoWaitLock::new(Example{ a: 10, b: 20 }); +/// +/// // Modifying the protected value. +/// { +/// let mut guard = x.try_lock().unwrap(); +/// assert_eq!(guard.a, 10); +/// assert_eq!(guard.b, 20); +/// guard.a += 20; +/// guard.b += 20; +/// assert_eq!(guard.a, 30); +/// assert_eq!(guard.b, 40); +/// } +/// +/// // Reading the protected value. +/// { +/// let guard = x.try_lock().unwrap(); +/// assert_eq!(guard.a, 30); +/// assert_eq!(guard.b, 40); +/// } +/// +/// // Second acquire fails, but succeeds after the guard is dropped. +/// { +/// let guard = x.try_lock().unwrap(); +/// assert!(x.try_lock().is_none()); +/// +/// drop(guard); +/// assert!(x.try_lock().is_some()); +/// } +/// ``` +/// +/// The following examples use the [`NoWaitLockGuard::unlock`] to release the lock and check for +/// contention. +/// +/// ``` +/// use kernel::sync::NoWaitLock; +/// +/// #[derive(PartialEq)] +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// let x = NoWaitLock::new(Example{ a: 10, b: 20 }); +/// +/// // No contention when lock is released. +/// let guard = x.try_lock().unwrap(); +/// assert_eq!(guard.unlock(), false); +/// +/// // Contention detected. +/// let guard = x.try_lock().unwrap(); +/// assert!(x.try_lock().is_none()); +/// assert_eq!(guard.unlock(), true); +/// +/// // No contention again. +/// let guard = x.try_lock().unwrap(); +/// assert_eq!(guard.a, 10); +/// assert_eq!(guard.b, 20); +/// assert_eq!(guard.unlock(), false); +/// ``` +pub struct NoWaitLock { + state: AtomicU8, + data: UnsafeCell, +} + +// SAFETY: `NoWaitLock` can be transferred across thread boundaries iff the data it protects can. +unsafe impl Send for NoWaitLock {} + +// SAFETY: `NoWaitLock` only allows a single thread at a time to access the interior mutability it +// provides, so it is `Sync` as long as the data it protects is `Send`. +unsafe impl Sync for NoWaitLock {} + +impl NoWaitLock { + /// Creates a new instance of the no-wait lock. + pub fn new(data: T) -> Self { + Self { + state: AtomicU8::new(0), + data: UnsafeCell::new(data), + } + } +} + +impl NoWaitLock { + /// Tries to acquire the lock. + /// + /// If no other thread/CPU currently owns the lock, it returns a guard that can be used to + /// access the protected data. Otherwise (i.e., the lock is already owned), it returns `None`. + pub fn try_lock(&self) -> Option> { + // Fast path -- just set the LOCKED bit. + // + // Acquire ordering matches the release in `NoWaitLockGuard::drop` or + // `NoWaitLockGuard::unlock`. + if self.state.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 { + // INVARIANTS: The thread that manages to set the `LOCKED` bit becomes the owner. + return Some(NoWaitLockGuard { lock: self }); + } + + // Set the `CONTENDED` bit. + // + // If the `LOCKED` bit has since been reset, the lock was released and the caller becomes + // the owner of the lock. It will see the `CONTENDED` bit when it releases the lock even if + // there was no additional contention but this is allowed by the interface. + if self.state.fetch_or(CONTENDED | LOCKED, Ordering::Relaxed) & LOCKED == 0 { + // INVARIANTS: The thread that manages to set the `LOCKED` bit becomes the owner. + Some(NoWaitLockGuard { lock: self }) + } else { + None + } + } +} + +/// A guard for the holder of the no-wait lock. +/// +/// # Invariants +/// +/// Only the current owner can have an instance of [`NoWaitLockGuard`]. +pub struct NoWaitLockGuard<'a, T: ?Sized> { + lock: &'a NoWaitLock, +} + +impl NoWaitLockGuard<'_, T> { + /// Unlocks the no-wait lock. + /// + /// The return value indicates whether there was contention while the lock was held, that is, + /// whether another thread tried (and failed) to acquire the lock. + pub fn unlock(self) -> bool { + // Matches the acquire in `NoWaitLock::try_lock`. + let contention = self.lock.state.swap(0, Ordering::Release) & CONTENDED != 0; + core::mem::forget(self); + contention + } +} + +impl core::ops::Deref for NoWaitLockGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: The type invariant guarantees that only the owner has an instance of the guard, + // so the owner is the only one that can call this function. + unsafe { &*self.lock.data.get() } + } +} + +impl core::ops::DerefMut for NoWaitLockGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: The type invariant guarantees that only the owner has an instance of the guard, + // so the owner is the only one that can call this function. + unsafe { &mut *self.lock.data.get() } + } +} + +impl Drop for NoWaitLockGuard<'_, T> { + fn drop(&mut self) { + // Matches the acquire in `NoWaitLock::try_lock`. + self.lock.state.store(0, Ordering::Release); + } +} diff --git a/rust/kernel/sync/revocable.rs b/rust/kernel/sync/revocable.rs new file mode 100644 index 0000000000000000000000000000000000000000..ddaa86e123f2b35a8faa2d80eb745a128240a021 --- /dev/null +++ b/rust/kernel/sync/revocable.rs @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Synchronisation primitives where access to their contents can be revoked at runtime. + +use crate::{ + bindings, + str::CStr, + sync::{Guard, Lock, LockFactory, LockInfo, NeedsLockClass, ReadLock, WriteLock}, + True, +}; +use core::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, + pin::Pin, +}; + +/// The state within the revocable synchronisation primitive. +/// +/// We don't use simply `Option` because we need to drop in-place because the contents are +/// implicitly pinned. +/// +/// # Invariants +/// +/// The `is_available` field determines if `data` is initialised. +pub struct Inner { + is_available: bool, + data: MaybeUninit, +} + +impl Inner { + fn new(data: T) -> Self { + // INVARIANT: `data` is initialised and `is_available` is `true`, so the state matches. + Self { + is_available: true, + data: MaybeUninit::new(data), + } + } + + fn drop_in_place(&mut self) { + if !self.is_available { + // Already dropped. + return; + } + + // INVARIANT: `data` is being dropped and `is_available` is set to `false`, so the state + // matches. + self.is_available = false; + + // SAFETY: By the type invariants, `data` is valid because `is_available` was true. + unsafe { self.data.assume_init_drop() }; + } +} + +impl Drop for Inner { + fn drop(&mut self) { + self.drop_in_place(); + } +} + +/// Revocable synchronisation primitive. +/// +/// That is, it wraps synchronisation primitives so that access to their contents can be revoked at +/// runtime, rendering them inacessible. +/// +/// Once access is revoked and all concurrent users complete (i.e., all existing instances of +/// [`RevocableGuard`] are dropped), the wrapped object is also dropped. +/// +/// For better ergonomics, we advise the use of specialisations of this struct, for example, +/// [`super::RevocableMutex`] and [`super::RevocableRwSemaphore`]. Callers that do not need to +/// sleep while holding on to a guard should use [`crate::revocable::Revocable`] instead, which is +/// more efficient as it uses RCU to keep objects alive. +/// +/// # Examples +/// +/// ``` +/// # use kernel::sync::{Mutex, Revocable}; +/// # use kernel::revocable_init; +/// # use core::pin::Pin; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn add_two(v: &Revocable, Example>) -> Option { +/// let mut guard = v.try_write()?; +/// guard.a += 2; +/// guard.b += 2; +/// Some(guard.a + guard.b) +/// } +/// +/// // SAFETY: We call `revocable_init` immediately below. +/// let mut v = unsafe { Revocable::, Example>::new(Example { a: 10, b: 20 }) }; +/// // SAFETY: We never move out of `v`. +/// let pinned = unsafe { Pin::new_unchecked(&mut v) }; +/// revocable_init!(pinned, "example::v"); +/// assert_eq!(add_two(&v), Some(34)); +/// v.revoke(); +/// assert_eq!(add_two(&v), None); +/// ``` +pub struct Revocable { + inner: F::LockedType>, +} + +/// Safely initialises a [`Revocable`] instance with the given name, generating a new lock class. +#[macro_export] +macro_rules! revocable_init { + ($mutex:expr, $name:literal) => { + $crate::init_with_lockdep!($mutex, $name) + }; +} + +impl Revocable { + /// Creates a new revocable instance of the given lock. + /// + /// # Safety + /// + /// The caller must call [`Revocable::init`] before using the revocable synch primitive. + pub unsafe fn new(data: T) -> Self { + Self { + // SAFETY: The safety requirements of this function require that `Revocable::init` + // be called before the returned object can be used. Lock initialisation is called + // from `Revocable::init`. + inner: unsafe { F::new_lock(Inner::new(data)) }, + } + } +} + +impl NeedsLockClass for Revocable +where + F::LockedType>: NeedsLockClass, +{ + unsafe fn init( + self: Pin<&mut Self>, + name: &'static CStr, + key1: *mut bindings::lock_class_key, + key2: *mut bindings::lock_class_key, + ) { + // SAFETY: `inner` is pinned when `self` is. + let inner = unsafe { self.map_unchecked_mut(|r| &mut r.inner) }; + + // SAFETY: The safety requirements of this function satisfy the ones for `inner.init` + // (they're the same). + unsafe { inner.init(name, key1, key2) }; + } +} + +impl Revocable +where + F::LockedType>: Lock>, +{ + /// Revokes access to and drops the wrapped object. + /// + /// Revocation and dropping happen after ongoing accessors complete. + pub fn revoke(&self) { + self.lock().drop_in_place(); + } + + /// Tries to lock the \[revocable\] wrapped object in write (exclusive) mode. + /// + /// Returns `None` if the object has been revoked and is therefore no longer accessible. + /// + /// Returns a guard that gives access to the object otherwise; the object is guaranteed to + /// remain accessible while the guard is alive. Callers are allowed to sleep while holding on + /// to the returned guard. + pub fn try_write(&self) -> Option> { + let inner = self.lock(); + if !inner.is_available { + return None; + } + Some(RevocableGuard::new(inner)) + } + + fn lock(&self) -> Guard<'_, F::LockedType>> { + let ctx = self.inner.lock_noguard(); + // SAFETY: The lock was acquired in the call above. + unsafe { Guard::new(&self.inner, ctx) } + } +} + +impl Revocable +where + F::LockedType>: Lock>, +{ + /// Tries to lock the \[revocable\] wrapped object in read (shared) mode. + /// + /// Returns `None` if the object has been revoked and is therefore no longer accessible. + /// + /// Returns a guard that gives access to the object otherwise; the object is guaranteed to + /// remain accessible while the guard is alive. Callers are allowed to sleep while holding on + /// to the returned guard. + pub fn try_read(&self) -> Option> { + let ctx = self.inner.lock_noguard(); + // SAFETY: The lock was acquired in the call above. + let inner = unsafe { Guard::new(&self.inner, ctx) }; + if !inner.is_available { + return None; + } + Some(RevocableGuard::new(inner)) + } +} + +/// A guard that allows access to a revocable object and keeps it alive. +pub struct RevocableGuard<'a, F: LockFactory, T, I: LockInfo> +where + F::LockedType>: Lock>, +{ + guard: Guard<'a, F::LockedType>, I>, +} + +impl<'a, F: LockFactory, T, I: LockInfo> RevocableGuard<'a, F, T, I> +where + F::LockedType>: Lock>, +{ + fn new(guard: Guard<'a, F::LockedType>, I>) -> Self { + Self { guard } + } +} + +impl> RevocableGuard<'_, F, T, I> +where + F::LockedType>: Lock>, +{ + /// Returns a pinned mutable reference to the wrapped object. + pub fn as_pinned_mut(&mut self) -> Pin<&mut T> { + // SAFETY: Revocable mutexes must be pinned, so we choose to always project the data as + // pinned as well (i.e., we guarantee we never move it). + unsafe { Pin::new_unchecked(&mut *self) } + } +} + +impl Deref for RevocableGuard<'_, F, T, I> +where + F::LockedType>: Lock>, +{ + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.guard.data.as_ptr() } + } +} + +impl> DerefMut for RevocableGuard<'_, F, T, I> +where + F::LockedType>: Lock>, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.guard.data.as_mut_ptr() } + } +} diff --git a/rust/kernel/sync/rwsem.rs b/rust/kernel/sync/rwsem.rs new file mode 100644 index 0000000000000000000000000000000000000000..eb220e4972cfa5b3f4e937b1f8bdf3e0c73fd406 --- /dev/null +++ b/rust/kernel/sync/rwsem.rs @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A kernel read/write mutex. +//! +//! This module allows Rust code to use the kernel's [`struct rw_semaphore`]. +//! +//! C header: [`include/linux/rwsem.h`](../../../../include/linux/rwsem.h) + +use super::{mutex::EmptyGuardContext, Guard, Lock, LockFactory, LockIniter, ReadLock, WriteLock}; +use crate::{bindings, str::CStr, Opaque}; +use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin}; + +/// Safely initialises a [`RwSemaphore`] with the given name, generating a new lock class. +#[macro_export] +macro_rules! rwsemaphore_init { + ($rwsem:expr, $name:literal) => { + $crate::init_with_lockdep!($rwsem, $name) + }; +} + +/// Exposes the kernel's [`struct rw_semaphore`]. +/// +/// It's a read/write mutex. That is, it allows multiple readers to acquire it concurrently, but +/// only one writer at a time. On contention, waiters sleep. +/// +/// A [`RwSemaphore`] must first be initialised with a call to [`RwSemaphore::init_lock`] before it +/// can be used. The [`rwsemaphore_init`] macro is provided to automatically assign a new lock +/// class to an [`RwSemaphore`] instance. +/// +/// Since it may block, [`RwSemaphore`] needs to be used with care in atomic contexts. +/// +/// [`struct rw_semaphore`]: ../../../include/linux/rwsem.h +pub struct RwSemaphore { + /// The kernel `struct rw_semaphore` object. + rwsem: Opaque, + + /// An rwsem needs to be pinned because it contains a [`struct list_head`] that is + /// self-referential, so it cannot be safely moved once it is initialised. + _pin: PhantomPinned, + + /// The data protected by the rwsem. + data: UnsafeCell, +} + +// SAFETY: `RwSemaphore` can be transferred across thread boundaries iff the data it protects can. +#[allow(clippy::non_send_fields_in_send_ty)] +unsafe impl Send for RwSemaphore {} + +// SAFETY: `RwSemaphore` requires that the protected type be `Sync` for it to be `Sync` as well +// because the read mode allows multiple threads to access the protected data concurrently. It +// requires `Send` because the write lock allows a `&mut T` to be accessible from an arbitrary +// thread. +unsafe impl Sync for RwSemaphore {} + +impl RwSemaphore { + /// Constructs a new rw semaphore. + /// + /// # Safety + /// + /// The caller must call [`RwSemaphore::init_lock`] before using the rw semaphore. + pub unsafe fn new(t: T) -> Self { + Self { + rwsem: Opaque::uninit(), + data: UnsafeCell::new(t), + _pin: PhantomPinned, + } + } +} + +impl RwSemaphore { + /// Locks the rw semaphore in write (exclusive) mode and gives the caller access to the data + /// protected by it. Only one thread at a time is allowed to access the protected data. + pub fn write(&self) -> Guard<'_, Self> { + let ctx = ::lock_noguard(self); + // SAFETY: The rw semaphore was just acquired in write mode. + unsafe { Guard::new(self, ctx) } + } + + /// Locks the rw semaphore in read (shared) mode and gives the caller access to the data + /// protected by it. Only one thread at a time is allowed to access the protected data. + pub fn read(&self) -> Guard<'_, Self, ReadLock> { + let ctx = >::lock_noguard(self); + // SAFETY: The rw semaphore was just acquired in read mode. + unsafe { Guard::new(self, ctx) } + } +} + +impl LockFactory for RwSemaphore { + type LockedType = RwSemaphore; + + unsafe fn new_lock(data: U) -> RwSemaphore { + // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called. + unsafe { RwSemaphore::new(data) } + } +} + +impl LockIniter for RwSemaphore { + unsafe fn init_lock( + self: Pin<&mut Self>, + name: &'static CStr, + key: *mut bindings::lock_class_key, + ) { + unsafe { bindings::__init_rwsem(self.rwsem.get(), name.as_char_ptr(), key) }; + } +} + +// SAFETY: The underlying kernel `struct rw_semaphore` object ensures mutual exclusion because it's +// acquired in write mode. +unsafe impl Lock for RwSemaphore { + type Inner = T; + type GuardContext = EmptyGuardContext; + + fn lock_noguard(&self) -> EmptyGuardContext { + // SAFETY: `rwsem` points to valid memory. + unsafe { bindings::down_write(self.rwsem.get()) }; + EmptyGuardContext + } + + unsafe fn unlock(&self, _: &mut EmptyGuardContext) { + // SAFETY: The safety requirements of the function ensure that the rw semaphore is owned by + // the caller. + unsafe { bindings::up_write(self.rwsem.get()) }; + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +} + +// SAFETY: The underlying kernel `struct rw_semaphore` object ensures that only shared references +// are accessible from other threads because it's acquired in read mode. +unsafe impl Lock for RwSemaphore { + type Inner = T; + type GuardContext = EmptyGuardContext; + + fn lock_noguard(&self) -> EmptyGuardContext { + // SAFETY: `rwsem` points to valid memory. + unsafe { bindings::down_read(self.rwsem.get()) }; + EmptyGuardContext + } + + unsafe fn unlock(&self, _: &mut EmptyGuardContext) { + // SAFETY: The safety requirements of the function ensure that the rw semaphore is owned by + // the caller. + unsafe { bindings::up_read(self.rwsem.get()) }; + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +} + +/// A revocable rw semaphore. +/// +/// That is, a read/write semaphore to which access can be revoked at runtime. It is a +/// specialisation of the more generic [`super::revocable::Revocable`]. +/// +/// # Examples +/// +/// ``` +/// # use kernel::sync::RevocableRwSemaphore; +/// # use kernel::revocable_init; +/// # use core::pin::Pin; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn read_sum(v: &RevocableRwSemaphore) -> Option { +/// let guard = v.try_read()?; +/// Some(guard.a + guard.b) +/// } +/// +/// fn add_two(v: &RevocableRwSemaphore) -> Option { +/// let mut guard = v.try_write()?; +/// guard.a += 2; +/// guard.b += 2; +/// Some(guard.a + guard.b) +/// } +/// +/// // SAFETY: We call `revocable_init` immediately below. +/// let mut v = unsafe { RevocableRwSemaphore::new(Example { a: 10, b: 20 }) }; +/// // SAFETY: We never move out of `v`. +/// let pinned = unsafe { Pin::new_unchecked(&mut v) }; +/// revocable_init!(pinned, "example::v"); +/// assert_eq!(read_sum(&v), Some(30)); +/// assert_eq!(add_two(&v), Some(34)); +/// v.revoke(); +/// assert_eq!(read_sum(&v), None); +/// assert_eq!(add_two(&v), None); +/// ``` +pub type RevocableRwSemaphore = super::revocable::Revocable, T>; + +/// A guard for a revocable rw semaphore.. +pub type RevocableRwSemaphoreGuard<'a, T, I = WriteLock> = + super::revocable::RevocableGuard<'a, RwSemaphore<()>, T, I>; diff --git a/rust/kernel/sync/seqlock.rs b/rust/kernel/sync/seqlock.rs new file mode 100644 index 0000000000000000000000000000000000000000..f42d649823c66773c360348724d9d2bd85228511 --- /dev/null +++ b/rust/kernel/sync/seqlock.rs @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A kernel sequential lock (seqlock). +//! +//! This module allows Rust code to use the sequential locks based on the kernel's `seqcount_t` and +//! any locks implementing the [`LockFactory`] trait. +//! +//! See . + +use super::{Guard, Lock, LockFactory, LockIniter, NeedsLockClass, ReadLock}; +use crate::{bindings, str::CStr, Opaque}; +use core::{cell::UnsafeCell, marker::PhantomPinned, ops::Deref, pin::Pin}; + +/// Exposes sequential locks backed by the kernel's `seqcount_t`. +/// +/// The write-side critical section is protected by a lock implementing the [`LockFactory`] trait. +/// +/// # Examples +/// +///``` +/// use kernel::sync::{SeqLock, SpinLock}; +/// use core::sync::atomic::{AtomicU32, Ordering}; +/// +/// struct Example { +/// a: AtomicU32, +/// b: AtomicU32, +/// } +/// +/// fn get_sum(v: &SeqLock>) -> u32 { +/// // Use `access` to access the fields of `Example`. +/// v.access(|e| e.a.load(Ordering::Relaxed) + e.b.load(Ordering::Relaxed)) +/// } +/// +/// fn get_sum_with_guard(v: &SeqLock>) -> u32 { +/// // Use `read` and `need_retry` in a loop to access the fields of `Example`. +/// loop { +/// let guard = v.read(); +/// let sum = guard.a.load(Ordering::Relaxed) + guard.b.load(Ordering::Relaxed); +/// if !guard.need_retry() { +/// break sum; +/// } +/// } +/// } +/// +/// fn inc_each(v: &SeqLock>) { +/// // Use a write-side guard to access the fields of `Example`. +/// let guard = v.write(); +/// let a = guard.a.load(Ordering::Relaxed); +/// guard.a.store(a + 1, Ordering::Relaxed); +/// let b = guard.b.load(Ordering::Relaxed); +/// guard.b.store(b + 1, Ordering::Relaxed); +/// } +/// ``` +pub struct SeqLock { + _p: PhantomPinned, + count: Opaque, + write_lock: L, +} + +// SAFETY: `SeqLock` can be transferred across thread boundaries iff the data it protects and the +// underlying lock can. +#[allow(clippy::non_send_fields_in_send_ty)] +unsafe impl Send for SeqLock where L::Inner: Send {} + +// SAFETY: `SeqLock` allows concurrent access to the data it protects by both readers and writers, +// so it requires that the data it protects be `Sync`, as well as the underlying lock. +unsafe impl Sync for SeqLock where L::Inner: Sync {} + +impl SeqLock { + /// Constructs a new instance of [`SeqLock`]. + /// + /// # Safety + /// + /// The caller must call [`SeqLock::init`] before using the seqlock. + pub unsafe fn new(data: L::Inner) -> Self + where + L: LockFactory = L>, + L::Inner: Sized, + { + Self { + _p: PhantomPinned, + count: Opaque::uninit(), + // SAFETY: `L::init_lock` is called from `SeqLock::init`, which is required to be + // called by the function's safety requirements. + write_lock: unsafe { L::new_lock(data) }, + } + } +} + +impl SeqLock { + /// Accesses the protected data in read mode. + /// + /// Readers and writers are allowed to run concurrently, so callers must check if they need to + /// refetch the values before they are used (e.g., because a writer changed them concurrently, + /// rendering them potentially inconsistent). The check is performed via calls to + /// [`SeqLockReadGuard::need_retry`]. + pub fn read(&self) -> SeqLockReadGuard<'_, L> { + SeqLockReadGuard { + lock: self, + // SAFETY: `count` contains valid memory. + start_count: unsafe { bindings::read_seqcount_begin(self.count.get()) }, + } + } + + /// Accesses the protected data in read mode. + /// + /// The provided closure is called repeatedly if it may have accessed inconsistent data (e.g., + /// because a concurrent writer modified it). This is a wrapper around [`SeqLock::read`] and + /// [`SeqLockReadGuard::need_retry`] in a loop. + pub fn access R, R>(&self, cb: F) -> R { + loop { + let guard = self.read(); + let ret = cb(&guard); + if !guard.need_retry() { + return ret; + } + } + } + + /// Locks the underlying lock and returns a guard that allows access to the protected data. + /// + /// The guard is not mutable though because readers are still allowed to concurrently access + /// the data. The protected data structure needs to provide interior mutability itself (e.g., + /// via atomic types) for the individual fields that can be mutated. + pub fn write(&self) -> Guard<'_, Self, ReadLock> { + let ctx = self.lock_noguard(); + // SAFETY: The seqlock was just acquired. + unsafe { Guard::new(self, ctx) } + } +} + +impl NeedsLockClass for SeqLock { + unsafe fn init( + mut self: Pin<&mut Self>, + name: &'static CStr, + key1: *mut bindings::lock_class_key, + key2: *mut bindings::lock_class_key, + ) { + // SAFETY: `write_lock` is pinned when `self` is. + let pinned = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.write_lock) }; + // SAFETY: `key1` is valid by the safety requirements of this function. + unsafe { pinned.init_lock(name, key1) }; + // SAFETY: `key2` is valid by the safety requirements of this function. + unsafe { bindings::__seqcount_init(self.count.get(), name.as_char_ptr(), key2) }; + } +} + +// SAFETY: The underlying lock ensures mutual exclusion. +unsafe impl Lock for SeqLock { + type Inner = L::Inner; + type GuardContext = L::GuardContext; + + fn lock_noguard(&self) -> L::GuardContext { + let ctx = self.write_lock.lock_noguard(); + // SAFETY: `count` contains valid memory. + unsafe { bindings::write_seqcount_begin(self.count.get()) }; + ctx + } + + fn relock(&self, ctx: &mut L::GuardContext) { + self.write_lock.relock(ctx); + // SAFETY: `count` contains valid memory. + unsafe { bindings::write_seqcount_begin(self.count.get()) }; + } + + unsafe fn unlock(&self, ctx: &mut L::GuardContext) { + // SAFETY: The safety requirements of the function ensure that lock is owned by the caller. + unsafe { bindings::write_seqcount_end(self.count.get()) }; + // SAFETY: The safety requirements of the function ensure that lock is owned by the caller. + unsafe { self.write_lock.unlock(ctx) }; + } + + fn locked_data(&self) -> &UnsafeCell { + self.write_lock.locked_data() + } +} + +/// Allows read-side access to data protected by a sequential lock. +pub struct SeqLockReadGuard<'a, L: Lock + ?Sized> { + lock: &'a SeqLock, + start_count: u32, +} + +impl SeqLockReadGuard<'_, L> { + /// Determine if the callers needs to retry reading values. + /// + /// It returns `true` when a concurrent writer ran between the guard being created and + /// [`Self::need_retry`] being called. + pub fn need_retry(&self) -> bool { + // SAFETY: `count` is valid because the guard guarantees that the lock remains alive. + unsafe { bindings::read_seqcount_retry(self.lock.count.get(), self.start_count) != 0 } + } +} + +impl Deref for SeqLockReadGuard<'_, L> { + type Target = L::Inner; + + fn deref(&self) -> &Self::Target { + // SAFETY: We only ever allow shared access to the protected data. + unsafe { &*self.lock.locked_data().get() } + } +} diff --git a/rust/kernel/sync/smutex.rs b/rust/kernel/sync/smutex.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f6797361ab320269285627d5b8a7160011f028e --- /dev/null +++ b/rust/kernel/sync/smutex.rs @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A simple mutex implementation. +//! +//! Differently from [`super::Mutex`], this implementation does not require pinning, so the +//! ergonomics are much improved, though the implementation is not as feature-rich as the C-based +//! one. The main advantage is that it doesn't impose unsafe blocks on callers. +//! +//! The mutex is made up of 2 words in addition to the data it protects. The first one is accessed +//! concurrently by threads trying to acquire and release the mutex, it contains a "stack" of +//! waiters and a "locked" bit; the second one is only accessible by the thread holding the mutex, +//! it contains a queue of waiters. Waiters are moved from the stack to the queue when the mutex is +//! next unlocked while the stack is non-empty and the queue is empty. A single waiter is popped +//! from the wait queue when the owner of the mutex unlocks it. +//! +//! The initial state of the mutex is ``, meaning that it isn't +//! locked and both the waiter stack and queue are empty. +//! +//! A lock operation transitions the mutex to state ``. +//! +//! An unlock operation transitions the mutex back to the initial state, however, an attempt to +//! lock the mutex while it's already locked results in a waiter being created (on the stack) and +//! pushed onto the stack, so the state is ``. +//! +//! Another thread trying to lock the mutex results in another waiter being pushed onto the stack, +//! so the state becomes ``. +//! +//! In such states (queue is empty but stack is non-empty), the unlock operation is performed in +//! three steps: +//! 1. The stack is popped (but the mutex remains locked), so the state is: +//! `` +//! 2. The stack is turned into a queue by reversing it, so the state is: +//! ` +//! 3. Finally, the lock is released, and the first waiter is awakened, so the state is: +//! `` +//! +//! The mutex remains accessible to any threads attempting to lock it in any of the intermediate +//! states above. For example, while it is locked, other threads may add waiters to the stack +//! (which is ok because we want to release the ones on the queue first); another example is that +//! another thread may acquire the mutex before waiter W1 in the example above, this makes the +//! mutex unfair but this is desirable because the thread is running already and may in fact +//! release the lock before W1 manages to get scheduled -- it also mitigates the lock convoy +//! problem when the releasing thread wants to immediately acquire the lock again: it will be +//! allowed to do so (as long as W1 doesn't get to it first). +//! +//! When the waiter queue is non-empty, unlocking the mutex always results in the first waiter being +//! popped form the queue and awakened. + +use super::{mutex::EmptyGuardContext, Guard, Lock, LockFactory, LockIniter}; +use crate::{bindings, str::CStr, Opaque}; +use core::sync::atomic::{AtomicUsize, Ordering}; +use core::{cell::UnsafeCell, pin::Pin}; + +/// The value that is OR'd into the [`Mutex::waiter_stack`] when the mutex is locked. +const LOCKED: usize = 1; + +/// A simple mutex. +/// +/// This is mutual-exclusion primitive. It guarantees that only one thread at a time may access the +/// data it protects. When multiple threads attempt to lock the same mutex, only one at a time is +/// allowed to progress, the others will block (sleep) until the mutex is unlocked, at which point +/// another thread will be allowed to wake up and make progress. +/// +/// # Examples +/// +/// ``` +/// # use kernel::{Result, sync::Ref, sync::smutex::Mutex}; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// static EXAMPLE: Mutex = Mutex::new(Example{ a: 10, b: 20 }); +/// +/// fn inc_a(example: &Mutex) { +/// let mut guard = example.lock(); +/// guard.a += 1; +/// } +/// +/// fn sum(example: &Mutex) -> u32 { +/// let guard = example.lock(); +/// guard.a + guard.b +/// } +/// +/// fn try_new(a: u32, b: u32) -> Result>> { +/// Ref::try_new(Mutex::new(Example {a, b})) +/// } +/// +/// assert_eq!(EXAMPLE.lock().a, 10); +/// assert_eq!(sum(&EXAMPLE), 30); +/// +/// inc_a(&EXAMPLE); +/// +/// assert_eq!(EXAMPLE.lock().a, 11); +/// assert_eq!(sum(&EXAMPLE), 31); +/// +/// # try_new(42, 43); +/// ``` +pub struct Mutex { + /// A stack of waiters. + /// + /// It is accessed atomically by threads lock/unlocking the mutex. Additionally, the + /// least-significant bit is used to indicate whether the mutex is locked or not. + waiter_stack: AtomicUsize, + + /// A queue of waiters. + /// + /// This is only accessible to the holder of the mutex. When the owner of the mutex is + /// unlocking it, it will move waiters from the stack to the queue when the queue is empty and + /// the stack non-empty. + waiter_queue: UnsafeCell<*mut Waiter>, + + /// The data protected by the mutex. + data: UnsafeCell, +} + +// SAFETY: `Mutex` can be transferred across thread boundaries iff the data it protects can. +#[allow(clippy::non_send_fields_in_send_ty)] +unsafe impl Send for Mutex {} + +// SAFETY: `Mutex` serialises the interior mutability it provides, so it is `Sync` as long as the +// data it protects is `Send`. +unsafe impl Sync for Mutex {} + +impl Mutex { + /// Creates a new instance of the mutex. + pub const fn new(data: T) -> Self { + Self { + waiter_stack: AtomicUsize::new(0), + waiter_queue: UnsafeCell::new(core::ptr::null_mut()), + data: UnsafeCell::new(data), + } + } +} + +impl Mutex { + /// Locks the mutex and gives the caller access to the data protected by it. Only one thread at + /// a time is allowed to access the protected data. + pub fn lock(&self) -> Guard<'_, Self> { + let ctx = self.lock_noguard(); + // SAFETY: The mutex was just acquired. + unsafe { Guard::new(self, ctx) } + } +} + +impl LockFactory for Mutex { + type LockedType = Mutex; + + unsafe fn new_lock(data: U) -> Mutex { + Mutex::new(data) + } +} + +impl LockIniter for Mutex { + unsafe fn init_lock( + self: Pin<&mut Self>, + _name: &'static CStr, + _key: *mut bindings::lock_class_key, + ) { + } +} + +// SAFETY: The mutex implementation ensures mutual exclusion. +unsafe impl Lock for Mutex { + type Inner = T; + type GuardContext = EmptyGuardContext; + + fn lock_noguard(&self) -> EmptyGuardContext { + loop { + // Try the fast path: the caller owns the mutex if we manage to set the `LOCKED` bit. + // + // The `acquire` order matches with one of the `release` ones in `unlock`. + if self.waiter_stack.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 { + return EmptyGuardContext; + } + + // Slow path: we'll likely need to wait, so initialise a local waiter struct. + let mut waiter = Waiter { + completion: Opaque::uninit(), + next: core::ptr::null_mut(), + }; + + // SAFETY: The completion object was just allocated on the stack and is valid for + // writes. + unsafe { bindings::init_completion(waiter.completion.get()) }; + + // Try to enqueue the waiter by pushing into onto the waiter stack. We want to do it + // only while the mutex is locked by another thread. + loop { + // We use relaxed here because we're just reading the value we'll CAS later (which + // has a stronger ordering on success). + let mut v = self.waiter_stack.load(Ordering::Relaxed); + if v & LOCKED == 0 { + // The mutex was released by another thread, so try to acquire it. + // + // The `acquire` order matches with one of the `release` ones in `unlock`. + v = self.waiter_stack.fetch_or(LOCKED, Ordering::Acquire); + if v & LOCKED == 0 { + return EmptyGuardContext; + } + } + + waiter.next = (v & !LOCKED) as _; + + // The `release` order matches with `acquire` in `unlock` when the stack is swapped + // out. We use release order here to ensure that the other thread can see our + // waiter fully initialised. + if self + .waiter_stack + .compare_exchange( + v, + (&mut waiter as *mut _ as usize) | LOCKED, + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + break; + } + } + + // Wait for the owner to lock to wake this thread up. + // + // SAFETY: Completion object was previously initialised with `init_completion` and + // remains valid. + unsafe { bindings::wait_for_completion(waiter.completion.get()) }; + } + } + + unsafe fn unlock(&self, _: &mut EmptyGuardContext) { + // SAFETY: The caller owns the mutex, so it is safe to manipulate the local wait queue. + let mut waiter = unsafe { *self.waiter_queue.get() }; + loop { + // If we have a non-empty local queue of waiters, pop the first one, release the mutex, + // and wake it up (the popped waiter). + if !waiter.is_null() { + // SAFETY: The caller owns the mutex, so it is safe to manipulate the local wait + // queue. + unsafe { *self.waiter_queue.get() = (*waiter).next }; + + // The `release` order matches with one of the `acquire` ones in `lock_noguard`. + self.waiter_stack.fetch_and(!LOCKED, Ordering::Release); + + // Wake up the first waiter. + // + // SAFETY: The completion object was initialised before being added to the wait + // stack and is only removed above, when called completed. So it is safe for + // writes. + unsafe { bindings::complete_all((*waiter).completion.get()) }; + return; + } + + // Try the fast path when there are no local waiters. + // + // The `release` order matches with one of the `acquire` ones in `lock_noguard`. + if self + .waiter_stack + .compare_exchange(LOCKED, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() + { + return; + } + + // We don't have a local queue, so pull the whole stack off, reverse it, and use it as a + // local queue. Since we're manipulating this queue, we need to keep ownership of the + // mutex. + // + // The `acquire` order matches with the `release` one in `lock_noguard` where a waiter + // is pushed onto the stack. It ensures that we see the fully-initialised waiter. + let mut stack = + (self.waiter_stack.swap(LOCKED, Ordering::Acquire) & !LOCKED) as *mut Waiter; + while !stack.is_null() { + // SAFETY: The caller still owns the mutex, so it is safe to manipulate the + // elements of the wait queue, which will soon become that wait queue. + let next = unsafe { (*stack).next }; + + // SAFETY: Same as above. + unsafe { (*stack).next = waiter }; + + waiter = stack; + stack = next; + } + } + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +} + +struct Waiter { + completion: Opaque, + next: *mut Waiter, +} diff --git a/rust/kernel/sync/spinlock.rs b/rust/kernel/sync/spinlock.rs new file mode 100644 index 0000000000000000000000000000000000000000..fb324d63127f1e8ad1b6a9eb33286640a10d5344 --- /dev/null +++ b/rust/kernel/sync/spinlock.rs @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A kernel spinlock. +//! +//! This module allows Rust code to use the kernel's [`struct spinlock`]. +//! +//! See . + +use super::{mutex::EmptyGuardContext, Guard, Lock, LockFactory, LockInfo, LockIniter, WriteLock}; +use crate::{bindings, c_types, str::CStr, Opaque, True}; +use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin}; + +/// Safely initialises a [`SpinLock`] with the given name, generating a new lock class. +#[macro_export] +macro_rules! spinlock_init { + ($spinlock:expr, $name:literal) => { + $crate::init_with_lockdep!($spinlock, $name) + }; +} + +/// Exposes the kernel's [`spinlock_t`]. When multiple CPUs attempt to lock the same spinlock, only +/// one at a time is allowed to progress, the others will block (spinning) until the spinlock is +/// unlocked, at which point another CPU will be allowed to make progress. +/// +/// A [`SpinLock`] must first be initialised with a call to [`SpinLock::init_lock`] before it can be +/// used. The [`spinlock_init`] macro is provided to automatically assign a new lock class to a +/// spinlock instance. +/// +/// There are two ways to acquire the lock: +/// - [`SpinLock::lock`], which doesn't manage interrupt state, so it should be used in only two +/// cases: (a) when the caller knows that interrupts are disabled, or (b) when callers never use +/// it in atomic context (e.g., interrupt handlers), in which case it is ok for interrupts to be +/// enabled. +/// - [`SpinLock::lock_irqdisable`], which disables interrupts if they are enabled before +/// acquiring the lock. When the lock is released, the interrupt state is automatically returned +/// to its value before [`SpinLock::lock_irqdisable`] was called. +/// +/// # Examples +/// +/// ``` +/// # use kernel::sync::SpinLock; +/// # use core::pin::Pin; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// // Function that acquires spinlock without changing interrupt state. +/// fn lock_example(value: &SpinLock) { +/// let mut guard = value.lock(); +/// guard.a = 10; +/// guard.b = 20; +/// } +/// +/// // Function that acquires spinlock and disables interrupts while holding it. +/// fn lock_irqdisable_example(value: &SpinLock) { +/// let mut guard = value.lock_irqdisable(); +/// guard.a = 30; +/// guard.b = 40; +/// } +/// +/// // Initialises a spinlock. +/// // SAFETY: `spinlock_init` is called below. +/// let mut value = unsafe { SpinLock::new(Example { a: 1, b: 2 }) }; +/// // SAFETY: We don't move `value`. +/// kernel::spinlock_init!(unsafe { Pin::new_unchecked(&mut value) }, "value"); +/// +/// // Calls the example functions. +/// assert_eq!(value.lock().a, 1); +/// lock_example(&value); +/// assert_eq!(value.lock().a, 10); +/// lock_irqdisable_example(&value); +/// assert_eq!(value.lock().a, 30); +/// ``` +/// +/// [`spinlock_t`]: ../../../include/linux/spinlock.h +pub struct SpinLock { + spin_lock: Opaque, + + /// Spinlocks are architecture-defined. So we conservatively require them to be pinned in case + /// some architecture uses self-references now or in the future. + _pin: PhantomPinned, + + data: UnsafeCell, +} + +// SAFETY: `SpinLock` can be transferred across thread boundaries iff the data it protects can. +unsafe impl Send for SpinLock {} + +// SAFETY: `SpinLock` serialises the interior mutability it provides, so it is `Sync` as long as the +// data it protects is `Send`. +unsafe impl Sync for SpinLock {} + +impl SpinLock { + /// Constructs a new spinlock. + /// + /// # Safety + /// + /// The caller must call [`SpinLock::init_lock`] before using the spinlock. + pub const unsafe fn new(t: T) -> Self { + Self { + spin_lock: Opaque::uninit(), + data: UnsafeCell::new(t), + _pin: PhantomPinned, + } + } +} + +impl SpinLock { + /// Locks the spinlock and gives the caller access to the data protected by it. Only one thread + /// at a time is allowed to access the protected data. + pub fn lock(&self) -> Guard<'_, Self, WriteLock> { + let ctx = >::lock_noguard(self); + // SAFETY: The spinlock was just acquired. + unsafe { Guard::new(self, ctx) } + } + + /// Locks the spinlock and gives the caller access to the data protected by it. Additionally it + /// disables interrupts (if they are enabled). + /// + /// When the lock in unlocked, the interrupt state (enabled/disabled) is restored. + pub fn lock_irqdisable(&self) -> Guard<'_, Self, DisabledInterrupts> { + let ctx = >::lock_noguard(self); + // SAFETY: The spinlock was just acquired. + unsafe { Guard::new(self, ctx) } + } +} + +impl LockFactory for SpinLock { + type LockedType = SpinLock; + + unsafe fn new_lock(data: U) -> SpinLock { + // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called. + unsafe { SpinLock::new(data) } + } +} + +impl LockIniter for SpinLock { + unsafe fn init_lock( + self: Pin<&mut Self>, + name: &'static CStr, + key: *mut bindings::lock_class_key, + ) { + unsafe { bindings::__spin_lock_init(self.spin_lock.get(), name.as_char_ptr(), key) }; + } +} + +/// A type state indicating that interrupts were disabled. +pub struct DisabledInterrupts; +impl LockInfo for DisabledInterrupts { + type Writable = True; +} + +// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. +unsafe impl Lock for SpinLock { + type Inner = T; + type GuardContext = EmptyGuardContext; + + fn lock_noguard(&self) -> EmptyGuardContext { + // SAFETY: `spin_lock` points to valid memory. + unsafe { bindings::spin_lock(self.spin_lock.get()) }; + EmptyGuardContext + } + + unsafe fn unlock(&self, _: &mut EmptyGuardContext) { + // SAFETY: The safety requirements of the function ensure that the spinlock is owned by + // the caller. + unsafe { bindings::spin_unlock(self.spin_lock.get()) } + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +} + +// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. +unsafe impl Lock for SpinLock { + type Inner = T; + type GuardContext = c_types::c_ulong; + + fn lock_noguard(&self) -> c_types::c_ulong { + // SAFETY: `spin_lock` points to valid memory. + unsafe { bindings::spin_lock_irqsave(self.spin_lock.get()) } + } + + unsafe fn unlock(&self, ctx: &mut c_types::c_ulong) { + // SAFETY: The safety requirements of the function ensure that the spinlock is owned by + // the caller. + unsafe { bindings::spin_unlock_irqrestore(self.spin_lock.get(), *ctx) } + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +} + +/// Safely initialises a [`RawSpinLock`] with the given name, generating a new lock class. +#[macro_export] +macro_rules! rawspinlock_init { + ($spinlock:expr, $name:literal) => { + $crate::init_with_lockdep!($spinlock, $name) + }; +} + +/// Exposes the kernel's [`raw_spinlock_t`]. +/// +/// It is very similar to [`SpinLock`], except that it is guaranteed not to sleep even on RT +/// variants of the kernel. +/// +/// # Examples +/// +/// ``` +/// # use kernel::sync::RawSpinLock; +/// # use core::pin::Pin; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// // Function that acquires the raw spinlock without changing interrupt state. +/// fn lock_example(value: &RawSpinLock) { +/// let mut guard = value.lock(); +/// guard.a = 10; +/// guard.b = 20; +/// } +/// +/// // Function that acquires the raw spinlock and disables interrupts while holding it. +/// fn lock_irqdisable_example(value: &RawSpinLock) { +/// let mut guard = value.lock_irqdisable(); +/// guard.a = 30; +/// guard.b = 40; +/// } +/// +/// // Initialises a raw spinlock and calls the example functions. +/// fn spinlock_example() { +/// // SAFETY: `rawspinlock_init` is called below. +/// let mut value = unsafe { RawSpinLock::new(Example { a: 1, b: 2 }) }; +/// // SAFETY: We don't move `value`. +/// kernel::rawspinlock_init!(unsafe { Pin::new_unchecked(&mut value) }, "value"); +/// lock_example(&value); +/// lock_irqdisable_example(&value); +/// } +/// ``` +/// +/// [`raw_spinlock_t`]: ../../../include/linux/spinlock.h +pub struct RawSpinLock { + spin_lock: Opaque, + + // Spinlocks are architecture-defined. So we conservatively require them to be pinned in case + // some architecture uses self-references now or in the future. + _pin: PhantomPinned, + + data: UnsafeCell, +} + +// SAFETY: `RawSpinLock` can be transferred across thread boundaries iff the data it protects can. +unsafe impl Send for RawSpinLock {} + +// SAFETY: `RawSpinLock` serialises the interior mutability it provides, so it is `Sync` as long as +// the data it protects is `Send`. +unsafe impl Sync for RawSpinLock {} + +impl RawSpinLock { + /// Constructs a new raw spinlock. + /// + /// # Safety + /// + /// The caller must call [`RawSpinLock::init_lock`] before using the raw spinlock. + pub const unsafe fn new(t: T) -> Self { + Self { + spin_lock: Opaque::uninit(), + data: UnsafeCell::new(t), + _pin: PhantomPinned, + } + } +} + +impl RawSpinLock { + /// Locks the raw spinlock and gives the caller access to the data protected by it. Only one + /// thread at a time is allowed to access the protected data. + pub fn lock(&self) -> Guard<'_, Self, WriteLock> { + let ctx = >::lock_noguard(self); + // SAFETY: The raw spinlock was just acquired. + unsafe { Guard::new(self, ctx) } + } + + /// Locks the raw spinlock and gives the caller access to the data protected by it. + /// Additionally it disables interrupts (if they are enabled). + /// + /// When the lock in unlocked, the interrupt state (enabled/disabled) is restored. + pub fn lock_irqdisable(&self) -> Guard<'_, Self, DisabledInterrupts> { + let ctx = >::lock_noguard(self); + // SAFETY: The raw spinlock was just acquired. + unsafe { Guard::new(self, ctx) } + } +} + +impl LockFactory for RawSpinLock { + type LockedType = RawSpinLock; + + unsafe fn new_lock(data: U) -> RawSpinLock { + // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called. + unsafe { RawSpinLock::new(data) } + } +} + +impl LockIniter for RawSpinLock { + unsafe fn init_lock( + self: Pin<&mut Self>, + name: &'static CStr, + key: *mut bindings::lock_class_key, + ) { + unsafe { bindings::_raw_spin_lock_init(self.spin_lock.get(), name.as_char_ptr(), key) }; + } +} + +// SAFETY: The underlying kernel `raw_spinlock_t` object ensures mutual exclusion. +unsafe impl Lock for RawSpinLock { + type Inner = T; + type GuardContext = EmptyGuardContext; + + fn lock_noguard(&self) -> EmptyGuardContext { + // SAFETY: `spin_lock` points to valid memory. + unsafe { bindings::raw_spin_lock(self.spin_lock.get()) }; + EmptyGuardContext + } + + unsafe fn unlock(&self, _: &mut EmptyGuardContext) { + // SAFETY: The safety requirements of the function ensure that the raw spinlock is owned by + // the caller. + unsafe { bindings::raw_spin_unlock(self.spin_lock.get()) }; + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +} + +// SAFETY: The underlying kernel `raw_spinlock_t` object ensures mutual exclusion. +unsafe impl Lock for RawSpinLock { + type Inner = T; + type GuardContext = c_types::c_ulong; + + fn lock_noguard(&self) -> c_types::c_ulong { + // SAFETY: `spin_lock` points to valid memory. + unsafe { bindings::raw_spin_lock_irqsave(self.spin_lock.get()) } + } + + unsafe fn unlock(&self, ctx: &mut c_types::c_ulong) { + // SAFETY: The safety requirements of the function ensure that the raw spinlock is owned by + // the caller. + unsafe { bindings::raw_spin_unlock_irqrestore(self.spin_lock.get(), *ctx) }; + } + + fn locked_data(&self) -> &UnsafeCell { + &self.data + } +}