diff mbox series

[v2,5/8] fs: lockless mntns lookup for nsfs

Message ID 20241212-work-mount-rbtree-lockless-v2-5-4fe6cef02534@kernel.org (mailing list archive)
State New
Headers show
Series fs: lockless mntns lookup | expand

Commit Message

Christian Brauner Dec. 12, 2024, 11:56 a.m. UTC
We already made the rbtree lookup lockless for the simple lookup case.
However, walking the list of mount namespaces via nsfs still happens
with taking the read lock blocking concurrent additions of new mount
namespaces pointlessly. Plus, such additions are rare anyway so allow
lockless lookup of the previous and next mount namespace by keeping a
separate list. This also allows to make some things simpler in the code.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 fs/mount.h     | 17 +++++++----------
 fs/namespace.c | 34 +++++++++++++++++++++-------------
 fs/nsfs.c      |  5 +----
 3 files changed, 29 insertions(+), 27 deletions(-)

Comments

Peter Zijlstra Dec. 12, 2024, 12:48 p.m. UTC | #1
On Thu, Dec 12, 2024 at 12:56:04PM +0100, Christian Brauner wrote:

> @@ -146,6 +147,7 @@ static void mnt_ns_tree_add(struct mnt_namespace *ns)
>  
>  	mnt_ns_tree_write_lock();
>  	node = rb_find_add_rcu(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_cmp);
> +	list_add_tail_rcu(&ns->mnt_ns_list, &mnt_ns_list);
>  	mnt_ns_tree_write_unlock();

This only works if the entries are inserted in order -- if not, you can
do something like:

  prev = rb_prev(&ns->mnt_ns_tree_node);
  if (!prev) {
    // no previous, add to head
    list_add(&ns->mnt_ns_list, &mnt_ns_list); 
  } else {
    // add after the previous tree node
    prev_ns = container_of(prev, struct mnt_namespace, mnt_ns_tree_node);
    list_add_tail(&ns->mnt_ns_list, &prev_ns->mnt_ns_list);
  }
Christian Brauner Dec. 12, 2024, 5:33 p.m. UTC | #2
On Thu, Dec 12, 2024 at 01:48:17PM +0100, Peter Zijlstra wrote:
> On Thu, Dec 12, 2024 at 12:56:04PM +0100, Christian Brauner wrote:
> 
> > @@ -146,6 +147,7 @@ static void mnt_ns_tree_add(struct mnt_namespace *ns)
> >  
> >  	mnt_ns_tree_write_lock();
> >  	node = rb_find_add_rcu(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_cmp);
> > +	list_add_tail_rcu(&ns->mnt_ns_list, &mnt_ns_list);
> >  	mnt_ns_tree_write_unlock();
> 
> This only works if the entries are inserted in order -- if not, you can
> do something like:

If I understand your concern correctly then the entries should always be
inserted in order. Mount namespaces are sequentially allocated
serialized on the namespace semaphore "namespace_sem. So each mount
namespace receives a unique 64bit sequence number. If ten mount
namespaces are created with 1, 2, 3, ..., 10 then they are inserted into
the rbtree in that order. And so they should be added to that list in
the same order. That's why I kept it that simple.

Although I need to drop "fs: add mount namespace to rbtree late" to keep
that guarantee. Did I understand you correctly?

> 
>   prev = rb_prev(&ns->mnt_ns_tree_node);
>   if (!prev) {
>     // no previous, add to head
>     list_add(&ns->mnt_ns_list, &mnt_ns_list); 
>   } else {
>     // add after the previous tree node
>     prev_ns = container_of(prev, struct mnt_namespace, mnt_ns_tree_node);
>     list_add_tail(&ns->mnt_ns_list, &prev_ns->mnt_ns_list);
>   }
Christian Brauner Dec. 12, 2024, 6:24 p.m. UTC | #3
> > This only works if the entries are inserted in order -- if not, you can
> > do something like:
> 
> If I understand your concern correctly then the entries should always be
> inserted in order. Mount namespaces are sequentially allocated
> serialized on the namespace semaphore "namespace_sem. So each mount
> namespace receives a unique 64bit sequence number. If ten mount
> namespaces are created with 1, 2, 3, ..., 10 then they are inserted into
> the rbtree in that order. And so they should be added to that list in
> the same order. That's why I kept it that simple.

Nope, you were right, I was wrong. We allocate the sequence number
before we hold the namespace semaphore. I misremembered that code.
I'll fix this up as you suggested. Thanks!
diff mbox series

Patch

diff --git a/fs/mount.h b/fs/mount.h
index 3c3763d8ae821d6a117c528808dbc94d0251f964..b7edb4034c2131b758f953cefbf47d060e27e03a 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -17,7 +17,10 @@  struct mnt_namespace {
 	unsigned int		nr_mounts; /* # of mounts in the namespace */
 	unsigned int		pending_mounts;
 	union {
-		struct rb_node	mnt_ns_tree_node; /* node in the mnt_ns_tree */
+		struct {
+			struct list_head	mnt_ns_list;
+			struct rb_node		mnt_ns_tree_node; /* node in the mnt_ns_tree */
+		};
 		struct rcu_head mnt_ns_rcu;
 	};
 	refcount_t		passive; /* number references not pinning @mounts */
@@ -157,15 +160,9 @@  static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
 }
 
 bool has_locked_children(struct mount *mnt, struct dentry *dentry);
-struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mnt_ns, bool previous);
-static inline struct mnt_namespace *lookup_next_mnt_ns(struct mnt_namespace *mntns)
-{
-	return __lookup_next_mnt_ns(mntns, false);
-}
-static inline struct mnt_namespace *lookup_prev_mnt_ns(struct mnt_namespace *mntns)
-{
-	return __lookup_next_mnt_ns(mntns, true);
-}
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mnt_ns,
+					    bool previous);
+
 static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
 {
 	return container_of(ns, struct mnt_namespace, ns);
diff --git a/fs/namespace.c b/fs/namespace.c
index 9463b9ab95f0a5db32cfe5fc5564d7f25ce3e06f..a5e1b166be9430d47c295159292cb9028b2e2339 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -82,6 +82,7 @@  static DEFINE_RWLOCK(mnt_ns_tree_lock);
 static seqcount_rwlock_t mnt_ns_tree_seqcount = SEQCNT_RWLOCK_ZERO(mnt_ns_tree_seqcount, &mnt_ns_tree_lock);
 
 static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */
+static LIST_HEAD(mnt_ns_list); /* protected by mnt_ns_tree_lock */
 
 struct mount_kattr {
 	unsigned int attr_set;
@@ -146,6 +147,7 @@  static void mnt_ns_tree_add(struct mnt_namespace *ns)
 
 	mnt_ns_tree_write_lock();
 	node = rb_find_add_rcu(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_cmp);
+	list_add_tail_rcu(&ns->mnt_ns_list, &mnt_ns_list);
 	mnt_ns_tree_write_unlock();
 
 	WARN_ON_ONCE(node);
@@ -177,6 +179,7 @@  static void mnt_ns_tree_remove(struct mnt_namespace *ns)
 	if (!is_anon_ns(ns)) {
 		mnt_ns_tree_write_lock();
 		rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree);
+		list_bidir_del_rcu(&ns->mnt_ns_list);
 		mnt_ns_tree_write_unlock();
 	}
 
@@ -2091,30 +2094,34 @@  struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
 	return &mnt->ns;
 }
 
-struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mntns, bool previous)
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous)
 {
-	guard(read_lock)(&mnt_ns_tree_lock);
-	for (;;) {
-		struct rb_node *node;
+	struct list_head *list;
+
+	guard(rcu)();
 
+	for (;;) {
 		if (previous)
-			node = rb_prev(&mntns->mnt_ns_tree_node);
+			list = rcu_dereference(list_bidir_prev_rcu(&mntns->mnt_ns_list));
 		else
-			node = rb_next(&mntns->mnt_ns_tree_node);
-		if (!node)
+			list = rcu_dereference(list_next_rcu(&mntns->mnt_ns_list));
+		if (list_is_head(list, &mnt_ns_list))
 			return ERR_PTR(-ENOENT);
 
-		mntns = node_to_mnt_ns(node);
-		node = &mntns->mnt_ns_tree_node;
+		mntns = list_entry_rcu(list, struct mnt_namespace, mnt_ns_list);
 
+		/*
+		 * The last passive reference count is put with RCU
+		 * delay so accessing the mount namespace is not just
+		 * safe it's members are all still valid.
+		 */
 		if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN))
 			continue;
 
 		/*
-		 * Holding mnt_ns_tree_lock prevents the mount namespace from
-		 * being freed but it may well be on it's deathbed. We want an
-		 * active reference, not just a passive one here as we're
-		 * persisting the mount namespace.
+		 * We need an active reference count as we're persisting
+		 * the mount namespace and it might already be on its
+		 * deathbed.
 		 */
 		if (!refcount_inc_not_zero(&mntns->ns.count))
 			continue;
@@ -3931,6 +3938,7 @@  static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
 	refcount_set(&new_ns->ns.count, 1);
 	refcount_set(&new_ns->passive, 1);
 	new_ns->mounts = RB_ROOT;
+	INIT_LIST_HEAD(&new_ns->mnt_ns_list);
 	RB_CLEAR_NODE(&new_ns->mnt_ns_tree_node);
 	init_waitqueue_head(&new_ns->poll);
 	new_ns->user_ns = get_user_ns(user_ns);
diff --git a/fs/nsfs.c b/fs/nsfs.c
index c675fc40ce2dc674f0dafce5c4924b910a73a23f..663f8656158d52d391ba80ef1d320197d3d654e0 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -274,10 +274,7 @@  static long ns_ioctl(struct file *filp, unsigned int ioctl,
 		if (usize < MNT_NS_INFO_SIZE_VER0)
 			return -EINVAL;
 
-		if (previous)
-			mnt_ns = lookup_prev_mnt_ns(to_mnt_ns(ns));
-		else
-			mnt_ns = lookup_next_mnt_ns(to_mnt_ns(ns));
+		mnt_ns = get_sequential_mnt_ns(to_mnt_ns(ns), previous);
 		if (IS_ERR(mnt_ns))
 			return PTR_ERR(mnt_ns);