diff mbox series

[v8,4/7] mm: rust: add lock_vma_under_rcu

Message ID 20241120-vma-v8-4-eb31425da66b@google.com (mailing list archive)
State New
Headers show
Series Rust support for mm_struct, vm_area_struct, and mmap | expand

Commit Message

Alice Ryhl Nov. 20, 2024, 2:49 p.m. UTC
All of Rust Binder's existing calls to `vm_insert_page` could be
optimized to first attempt to use `lock_vma_under_rcu`. This patch
provides an abstraction to enable that.

Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
 rust/helpers/mm.c |  5 +++++
 rust/kernel/mm.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 61 insertions(+)

Comments

Lorenzo Stoakes Nov. 20, 2024, 7:29 p.m. UTC | #1
On Wed, Nov 20, 2024 at 02:49:58PM +0000, Alice Ryhl wrote:
> All of Rust Binder's existing calls to `vm_insert_page` could be
> optimized to first attempt to use `lock_vma_under_rcu`. This patch
> provides an abstraction to enable that.

I think there should be a blurb about what the VMA locks are, how they avoid
contention on the mmap read lock etc. before talking about a use case (though
it's useful to mention the motivating reason!)

>
> Signed-off-by: Alice Ryhl <aliceryhl@google.com>
> ---
>  rust/helpers/mm.c |  5 +++++
>  rust/kernel/mm.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 61 insertions(+)
>
> diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c
> index 7b72eb065a3e..81b510c96fd2 100644
> --- a/rust/helpers/mm.c
> +++ b/rust/helpers/mm.c
> @@ -43,3 +43,8 @@ struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm,
>  {
>  	return vma_lookup(mm, addr);
>  }
> +
> +void rust_helper_vma_end_read(struct vm_area_struct *vma)
> +{
> +	vma_end_read(vma);
> +}
> diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs
> index ace8e7d57afe..a15acb546f68 100644
> --- a/rust/kernel/mm.rs
> +++ b/rust/kernel/mm.rs
> @@ -13,6 +13,7 @@
>  use core::{ops::Deref, ptr::NonNull};
>
>  pub mod virt;
> +use virt::VmAreaRef;
>
>  /// A wrapper for the kernel's `struct mm_struct`.
>  ///
> @@ -170,6 +171,32 @@ pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser {
>          unsafe { &*ptr.cast() }
>      }
>
> +    /// Try to lock the vma read lock under rcu.

This reads oddly, I'd say 'try to acquire the VMA read lock'. It's not really
necessary to mention RCU here I'd say, as while lock_vma_under_rcu() acquires
the RCU lock in order to try to get the VMA read lock, it releases it afterwards
and you hold the VMA read luck until you are done with it and don't need to hold
an RCU lock.

A reader might otherwise be confused and think an RCU read lock is required to
be held throughout too which isn't the case (this is maybe a critique of the
name of the function too, sorry Suren :P).

> +    ///
> +    /// If this operation fails, the vma may still exist. In that case, you should take the mmap
> +    /// read lock and try to use `vma_lookup` instead.

This also reads oddly, you're more likely (assuming you are not arbitrarily
trying to acquire a lock on an address likely to be unmapped soon) to have
failed due to lock contention.

So I'd say 'this is an optimistic try lock operation, so it may fail, in which
case you should fall back to taking the mmap read lock'.

I'm not sure it's necessary to reference vma_lookup() either, especially as in
future versions of this code we might want to use a VMA iterator instead.

> +    ///
> +    /// When per-vma locks are disabled, this always returns `None`.
> +    #[inline]
> +    pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> {

Ah I love having lock guards available... Something I miss from C++ :>)

> +        #[cfg(CONFIG_PER_VMA_LOCK)]

Ah interesting, so we have an abstraction for kernel config operations!

> +        {
> +            // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where
> +            // `mm_users` is non-zero.
> +            let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr as _) };
> +            if !vma.is_null() {
> +                return Some(VmaReadGuard {
> +                    // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a
> +                    // valid vma. The vma is stable for as long as the vma read lock is held.
> +                    vma: unsafe { VmAreaRef::from_raw(vma) },
> +                    _nts: NotThreadSafe,
> +                });
> +            }
> +        }
> +
> +        None
> +    }
> +
>      /// Lock the mmap read lock.
>      #[inline]
>      pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> {
> @@ -238,3 +265,32 @@ fn drop(&mut self) {
>          unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) };
>      }
>  }
> +
> +/// A guard for the vma read lock.
> +///
> +/// # Invariants
> +///
> +/// This `VmaReadGuard` guard owns the vma read lock.
> +pub struct VmaReadGuard<'a> {
> +    vma: &'a VmAreaRef,
> +    // `vma_end_read` must be called on the same thread as where the lock was taken
> +    _nts: NotThreadSafe,
> +}
> +
> +// Make all `VmAreaRef` methods available on `VmaReadGuard`.
> +impl Deref for VmaReadGuard<'_> {
> +    type Target = VmAreaRef;
> +
> +    #[inline]
> +    fn deref(&self) -> &VmAreaRef {
> +        self.vma
> +    }
> +}
> +
> +impl Drop for VmaReadGuard<'_> {
> +    #[inline]
> +    fn drop(&mut self) {
> +        // SAFETY: We hold the read lock by the type invariants.
> +        unsafe { bindings::vma_end_read(self.vma.as_ptr()) };

Extremely nice to know it is _guaranteed_ this will eventually be called and
that we can be sure that the VMA is valid by the fact we hold it already and
etc.

Selling me on this rust thing here... ;)

> +    }
> +}
>
> --
> 2.47.0.371.ga323438b13-goog
>
diff mbox series

Patch

diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c
index 7b72eb065a3e..81b510c96fd2 100644
--- a/rust/helpers/mm.c
+++ b/rust/helpers/mm.c
@@ -43,3 +43,8 @@  struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm,
 {
 	return vma_lookup(mm, addr);
 }
+
+void rust_helper_vma_end_read(struct vm_area_struct *vma)
+{
+	vma_end_read(vma);
+}
diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs
index ace8e7d57afe..a15acb546f68 100644
--- a/rust/kernel/mm.rs
+++ b/rust/kernel/mm.rs
@@ -13,6 +13,7 @@ 
 use core::{ops::Deref, ptr::NonNull};
 
 pub mod virt;
+use virt::VmAreaRef;
 
 /// A wrapper for the kernel's `struct mm_struct`.
 ///
@@ -170,6 +171,32 @@  pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser {
         unsafe { &*ptr.cast() }
     }
 
+    /// Try to lock the vma read lock under rcu.
+    ///
+    /// If this operation fails, the vma may still exist. In that case, you should take the mmap
+    /// read lock and try to use `vma_lookup` instead.
+    ///
+    /// When per-vma locks are disabled, this always returns `None`.
+    #[inline]
+    pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> {
+        #[cfg(CONFIG_PER_VMA_LOCK)]
+        {
+            // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where
+            // `mm_users` is non-zero.
+            let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr as _) };
+            if !vma.is_null() {
+                return Some(VmaReadGuard {
+                    // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a
+                    // valid vma. The vma is stable for as long as the vma read lock is held.
+                    vma: unsafe { VmAreaRef::from_raw(vma) },
+                    _nts: NotThreadSafe,
+                });
+            }
+        }
+
+        None
+    }
+
     /// Lock the mmap read lock.
     #[inline]
     pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> {
@@ -238,3 +265,32 @@  fn drop(&mut self) {
         unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) };
     }
 }
+
+/// A guard for the vma read lock.
+///
+/// # Invariants
+///
+/// This `VmaReadGuard` guard owns the vma read lock.
+pub struct VmaReadGuard<'a> {
+    vma: &'a VmAreaRef,
+    // `vma_end_read` must be called on the same thread as where the lock was taken
+    _nts: NotThreadSafe,
+}
+
+// Make all `VmAreaRef` methods available on `VmaReadGuard`.
+impl Deref for VmaReadGuard<'_> {
+    type Target = VmAreaRef;
+
+    #[inline]
+    fn deref(&self) -> &VmAreaRef {
+        self.vma
+    }
+}
+
+impl Drop for VmaReadGuard<'_> {
+    #[inline]
+    fn drop(&mut self) {
+        // SAFETY: We hold the read lock by the type invariants.
+        unsafe { bindings::vma_end_read(self.vma.as_ptr()) };
+    }
+}