@@ -847,13 +847,6 @@ static inline bool fast_dput(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
return true;
}
-
- /*
- * Re-get the reference we optimistically dropped. We hold the
- * lock, and we just tested that it was zero, so we can just
- * set it to 1.
- */
- dentry->d_lockref.count = 1;
return false;
}
@@ -896,6 +889,7 @@ void dput(struct dentry *dentry)
}
/* Slow case: now with the dentry lock held */
+ dentry->d_lockref.count = 1;
rcu_read_unlock();
if (likely(retain_dentry(dentry))) {
@@ -930,6 +924,7 @@ void dput_to_list(struct dentry *dentry, struct list_head *list)
return;
}
rcu_read_unlock();
+ dentry->d_lockref.count = 1;
if (!retain_dentry(dentry))
__dput_to_list(dentry, list);
spin_unlock(&dentry->d_lock);
Currently the "need caller to do more work" path in fast_dput() has refcount decremented, then, with ->d_lock held and refcount verified to have reached 0 fast_dput() forcibly resets the refcount to 1. Move that resetting refcount to 1 into the callers; later in the series it will be massaged out of existence. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> --- fs/dcache.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-)