@@ -203,6 +203,22 @@ struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
}
EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
+unsigned long rust_helper_spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_lock_irqsave);
+
+void rust_helper_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ spin_unlock_irqrestore(lock, flags);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_unlock_irqrestore);
+
/*
* We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
* as the Rust `usize` type, so we can use it in contexts where Rust
@@ -64,6 +64,8 @@ macro_rules! new_spinlock {
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
+/// assert_eq!(e.d.lock_irqsave().a, 20);
+/// assert_eq!(e.d.lock_irqsave().b, 30);
/// ```
///
/// The following example shows how to use interior mutability to modify the contents of a struct
@@ -81,6 +83,12 @@ macro_rules! new_spinlock {
/// let mut guard = m.lock();
/// guard.a += 10;
/// guard.b += 20;
+///
+/// fn example2(m: &SpinLock<Example>) {
+/// let mut guard = m.lock_irqsave();
+/// guard.a += 10;
+/// guard.b += 20;
+/// }
/// }
/// ```
///
@@ -94,7 +102,7 @@ pub struct SpinLockBackend;
// default implementation that always calls the same locking method.
unsafe impl super::Backend for SpinLockBackend {
type State = CachePadded<bindings::spinlock_t>;
- type GuardState = ();
+ type GuardState = Option<core::ffi::c_ulong>;
unsafe fn init(
ptr: *mut Self::State,
@@ -110,13 +118,32 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
- unsafe { bindings::spin_lock((&mut *ptr).deref_mut()) }
+ unsafe { bindings::spin_lock((&mut *ptr).deref_mut()) };
+ None
}
- #[inline(always)]
- unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
- // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
- // caller is the owner of the mutex.
- unsafe { bindings::spin_unlock((&mut *ptr).deref_mut()) }
+ unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState) {
+ match guard_state {
+ // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that
+ // the caller is the owner of the mutex.
+ Some(flags) => unsafe {
+ bindings::spin_unlock_irqrestore((&mut *ptr).deref_mut(), *flags)
+ },
+ // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that
+ // the caller is the owner of the mutex.
+ None => unsafe { bindings::spin_unlock((&mut *ptr).deref_mut()) },
+ }
+ }
+}
+
+// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. We use the `irqsave`
+// variant of the C lock acquisition functions to disable interrupts and retrieve the original
+// interrupt state, and the `irqrestore` variant of the lock release functions to restore the state
+// in `unlock` -- we use the guard context to determine which method was used to acquire the lock.
+unsafe impl super::IrqSaveBackend for SpinLockBackend {
+ unsafe fn lock_irqsave(ptr: *mut Self::State) -> Self::GuardState {
+ // SAFETY: The safety requirements of this function ensure that `ptr` points to valid
+ // memory, and that it has been initialised before.
+ Some(unsafe { bindings::spin_lock_irqsave((&mut *ptr).deref_mut()) })
}
}