diff mbox series

[1/2] super: massage wait event mechanism

Message ID 20231127-vfs-super-massage-wait-v1-1-9ab277bfd01a@kernel.org (mailing list archive)
State New, archived
Headers show
Series super: small tweaks | expand

Commit Message

Christian Brauner Nov. 27, 2023, 11:51 a.m. UTC
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 fs/super.c | 51 ++++++++++++++-------------------------------------
 1 file changed, 14 insertions(+), 37 deletions(-)

Comments

Christoph Hellwig Nov. 27, 2023, 1:59 p.m. UTC | #1
Can you explain why you're "massaging" things here?
Christian Brauner Nov. 27, 2023, 2:52 p.m. UTC | #2
On Mon, Nov 27, 2023 at 02:59:00PM +0100, Christoph Hellwig wrote:
> Can you explain why you're "massaging" things here?

Ah, I didn't update my commit message before sending out:

"We're currently using two separate helpers wait_born() and wait_dead()
when we can just all do it in a single helper super_load_flags(). We're
also acquiring the lock before we check whether this superblock is even
a viable candidate. If it's already dying we don't even need to bother
with the lock."

Is that alright?
Christoph Hellwig Nov. 27, 2023, 2:54 p.m. UTC | #3
On Mon, Nov 27, 2023 at 03:52:56PM +0100, Christian Brauner wrote:
> On Mon, Nov 27, 2023 at 02:59:00PM +0100, Christoph Hellwig wrote:
> > Can you explain why you're "massaging" things here?
> 
> Ah, I didn't update my commit message before sending out:
> 
> "We're currently using two separate helpers wait_born() and wait_dead()
> when we can just all do it in a single helper super_load_flags(). We're
> also acquiring the lock before we check whether this superblock is even
> a viable candidate. If it's already dying we don't even need to bother
> with the lock."
> 
> Is that alright?

Sounds good, but now I need to go back and cross-reference it with
what actuall is in the patch :)
Christoph Hellwig Nov. 27, 2023, 4:46 p.m. UTC | #4
On Mon, Nov 27, 2023 at 12:51:30PM +0100, Christian Brauner wrote:
> Signed-off-by: Christian Brauner <brauner@kernel.org>
> ---
>  fs/super.c | 51 ++++++++++++++-------------------------------------
>  1 file changed, 14 insertions(+), 37 deletions(-)
> 
> diff --git a/fs/super.c b/fs/super.c
> index aa4e5c11ee32..f3227b22879d 100644
> --- a/fs/super.c
> +++ b/fs/super.c
> @@ -81,16 +81,13 @@ static inline void super_unlock_shared(struct super_block *sb)
>  	super_unlock(sb, false);
>  }
>  
> +static bool super_load_flags(const struct super_block *sb, unsigned int flags)
>  {
>  	/*
>  	 * Pairs with smp_store_release() in super_wake() and ensures
> +	 * that we see @flags after we're woken.
>  	 */
> +	return smp_load_acquire(&sb->s_flags) & flags;

I find the name for this helper very confusing.  Yes, eventually it
is clear that the load maps to a load instruction with acquire semantics
here, but it's a really unusual naming I think.  Unfortunately I can't
offer a better one either.

Otherwise this looks good except for the fact that I really hate
code using smp_load_acquire / smp_store_release directly because of
all the mental load it causes.
Christian Brauner Nov. 28, 2023, 3:56 p.m. UTC | #5
On Mon, Nov 27, 2023 at 05:46:37PM +0100, Christoph Hellwig wrote:
> On Mon, Nov 27, 2023 at 12:51:30PM +0100, Christian Brauner wrote:
> > Signed-off-by: Christian Brauner <brauner@kernel.org>
> > ---
> >  fs/super.c | 51 ++++++++++++++-------------------------------------
> >  1 file changed, 14 insertions(+), 37 deletions(-)
> > 
> > diff --git a/fs/super.c b/fs/super.c
> > index aa4e5c11ee32..f3227b22879d 100644
> > --- a/fs/super.c
> > +++ b/fs/super.c
> > @@ -81,16 +81,13 @@ static inline void super_unlock_shared(struct super_block *sb)
> >  	super_unlock(sb, false);
> >  }
> >  
> > +static bool super_load_flags(const struct super_block *sb, unsigned int flags)
> >  {
> >  	/*
> >  	 * Pairs with smp_store_release() in super_wake() and ensures
> > +	 * that we see @flags after we're woken.
> >  	 */
> > +	return smp_load_acquire(&sb->s_flags) & flags;
> 
> I find the name for this helper very confusing.  Yes, eventually it
> is clear that the load maps to a load instruction with acquire semantics
> here, but it's a really unusual naming I think.  Unfortunately I can't
> offer a better one either.

I'll just drop the load from the middle then.

> 
> Otherwise this looks good except for the fact that I really hate
> code using smp_load_acquire / smp_store_release directly because of
> all the mental load it causes.

Hm, it's pretty common in our code in so many places...
diff mbox series

Patch

diff --git a/fs/super.c b/fs/super.c
index aa4e5c11ee32..f3227b22879d 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -81,16 +81,13 @@  static inline void super_unlock_shared(struct super_block *sb)
 	super_unlock(sb, false);
 }
 
-static inline bool wait_born(struct super_block *sb)
+static bool super_load_flags(const struct super_block *sb, unsigned int flags)
 {
-	unsigned int flags;
-
 	/*
 	 * Pairs with smp_store_release() in super_wake() and ensures
-	 * that we see SB_BORN or SB_DYING after we're woken.
+	 * that we see @flags after we're woken.
 	 */
-	flags = smp_load_acquire(&sb->s_flags);
-	return flags & (SB_BORN | SB_DYING);
+	return smp_load_acquire(&sb->s_flags) & flags;
 }
 
 /**
@@ -111,10 +108,15 @@  static inline bool wait_born(struct super_block *sb)
  */
 static __must_check bool super_lock(struct super_block *sb, bool excl)
 {
-
 	lockdep_assert_not_held(&sb->s_umount);
 
-relock:
+	/* wait until the superblock is ready or dying */
+	wait_var_event(&sb->s_flags, super_load_flags(sb, SB_BORN | SB_DYING));
+
+	/* Don't pointlessly acquire s_umount. */
+	if (super_load_flags(sb, SB_DYING))
+		return false;
+
 	__super_lock(sb, excl);
 
 	/*
@@ -127,20 +129,8 @@  static __must_check bool super_lock(struct super_block *sb, bool excl)
 		return false;
 	}
 
-	/* Has called ->get_tree() successfully. */
-	if (sb->s_flags & SB_BORN)
-		return true;
-
-	super_unlock(sb, excl);
-
-	/* wait until the superblock is ready or dying */
-	wait_var_event(&sb->s_flags, wait_born(sb));
-
-	/*
-	 * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
-	 * Just reacquire @sb->s_umount for the caller.
-	 */
-	goto relock;
+	WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
+	return true;
 }
 
 /* wait and try to acquire read-side of @sb->s_umount */
@@ -523,18 +513,6 @@  void deactivate_super(struct super_block *s)
 
 EXPORT_SYMBOL(deactivate_super);
 
-static inline bool wait_dead(struct super_block *sb)
-{
-	unsigned int flags;
-
-	/*
-	 * Pairs with memory barrier in super_wake() and ensures
-	 * that we see SB_DEAD after we're woken.
-	 */
-	flags = smp_load_acquire(&sb->s_flags);
-	return flags & SB_DEAD;
-}
-
 /**
  * grab_super - acquire an active reference to a superblock
  * @sb: superblock to acquire
@@ -561,7 +539,7 @@  static bool grab_super(struct super_block *sb)
 		}
 		super_unlock_excl(sb);
 	}
-	wait_var_event(&sb->s_flags, wait_dead(sb));
+	wait_var_event(&sb->s_flags, super_load_flags(sb, SB_DEAD));
 	put_super(sb);
 	return false;
 }
@@ -908,8 +886,7 @@  static void __iterate_supers(void (*f)(struct super_block *))
 
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		/* Pairs with memory marrier in super_wake(). */
-		if (smp_load_acquire(&sb->s_flags) & SB_DYING)
+		if (super_load_flags(sb, SB_DYING))
 			continue;
 		sb->s_count++;
 		spin_unlock(&sb_lock);