diff mbox series

[10/21] d_alloc_parallel(): set DCACHE_PAR_LOOKUP earlier

Message ID 20250224212051.1756517-10-viro@zeniv.linux.org.uk (mailing list archive)
State New
Headers show
Series [01/21] procfs: kill ->proc_dops | expand

Commit Message

Al Viro Feb. 24, 2025, 9:20 p.m. UTC
Do that before new dentry is visible anywhere.  It does create
a new possible state for dentries present in ->d_children/->d_sib -
DCACHE_PAR_LOOKUP present, negative, unhashed, not in in-lookup
hash chains, refcount positive.  Those are going to be skipped
by all tree-walkers (both d_walk() callbacks in fs/dcache.c and
explicit loops over children/sibling lists elsewhere) and
dput() is fine with those.

NOTE: dropping the final reference to a "normal" in-lookup dentry
(in in-lookup hash) is a bug - somebody must've forgotten to
call d_lookup_done() on it and bad things will happen.  With those
it's OK; if/when we get around to making __dentry_kill() complain
about such breakage, remember that predicate to check should
*not* be just d_in_lookup(victim) but rather a combination of that
with hlist_bl_unhashed(&victim->d_u.d_in_lookup_hash).  Might
be worth to consider later...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
---
 fs/dcache.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

Comments

Christian Brauner Feb. 26, 2025, 8:36 a.m. UTC | #1
On Mon, Feb 24, 2025 at 09:20:40PM +0000, Al Viro wrote:
> Do that before new dentry is visible anywhere.  It does create
> a new possible state for dentries present in ->d_children/->d_sib -
> DCACHE_PAR_LOOKUP present, negative, unhashed, not in in-lookup
> hash chains, refcount positive.  Those are going to be skipped
> by all tree-walkers (both d_walk() callbacks in fs/dcache.c and
> explicit loops over children/sibling lists elsewhere) and
> dput() is fine with those.
> 
> NOTE: dropping the final reference to a "normal" in-lookup dentry
> (in in-lookup hash) is a bug - somebody must've forgotten to
> call d_lookup_done() on it and bad things will happen.  With those
> it's OK; if/when we get around to making __dentry_kill() complain
> about such breakage, remember that predicate to check should
> *not* be just d_in_lookup(victim) but rather a combination of that
> with hlist_bl_unhashed(&victim->d_u.d_in_lookup_hash).  Might
> be worth to consider later...
> 
> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
> ---

Reviewed-by: Christian Brauner <brauner@kernel.org>

>  fs/dcache.c | 10 +++++++---
>  1 file changed, 7 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/dcache.c b/fs/dcache.c
> index 29db27228d97..9ad7cbb5a6b0 100644
> --- a/fs/dcache.c
> +++ b/fs/dcache.c
> @@ -2518,13 +2518,19 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
>  	unsigned int hash = name->hash;
>  	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
>  	struct hlist_bl_node *node;
> -	struct dentry *new = d_alloc(parent, name);
> +	struct dentry *new = __d_alloc(parent->d_sb, name);
>  	struct dentry *dentry;
>  	unsigned seq, r_seq, d_seq;
>  
>  	if (unlikely(!new))
>  		return ERR_PTR(-ENOMEM);

This is minor but it would be clearer if the __d_alloc() call was placed
directly above the error handling.

>  
> +	new->d_flags |= DCACHE_PAR_LOOKUP;
> +	spin_lock(&parent->d_lock);
> +	new->d_parent = dget_dlock(parent);
> +	hlist_add_head(&new->d_sib, &parent->d_children);
> +	spin_unlock(&parent->d_lock);
> +
>  retry:
>  	rcu_read_lock();
>  	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
> @@ -2608,8 +2614,6 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
>  		return dentry;
>  	}
>  	rcu_read_unlock();
> -	/* we can't take ->d_lock here; it's OK, though. */
> -	new->d_flags |= DCACHE_PAR_LOOKUP;
>  	new->d_wait = wq;
>  	hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
>  	hlist_bl_unlock(b);
> -- 
> 2.39.5
>
diff mbox series

Patch

diff --git a/fs/dcache.c b/fs/dcache.c
index 29db27228d97..9ad7cbb5a6b0 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2518,13 +2518,19 @@  struct dentry *d_alloc_parallel(struct dentry *parent,
 	unsigned int hash = name->hash;
 	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
 	struct hlist_bl_node *node;
-	struct dentry *new = d_alloc(parent, name);
+	struct dentry *new = __d_alloc(parent->d_sb, name);
 	struct dentry *dentry;
 	unsigned seq, r_seq, d_seq;
 
 	if (unlikely(!new))
 		return ERR_PTR(-ENOMEM);
 
+	new->d_flags |= DCACHE_PAR_LOOKUP;
+	spin_lock(&parent->d_lock);
+	new->d_parent = dget_dlock(parent);
+	hlist_add_head(&new->d_sib, &parent->d_children);
+	spin_unlock(&parent->d_lock);
+
 retry:
 	rcu_read_lock();
 	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
@@ -2608,8 +2614,6 @@  struct dentry *d_alloc_parallel(struct dentry *parent,
 		return dentry;
 	}
 	rcu_read_unlock();
-	/* we can't take ->d_lock here; it's OK, though. */
-	new->d_flags |= DCACHE_PAR_LOOKUP;
 	new->d_wait = wq;
 	hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
 	hlist_bl_unlock(b);