diff mbox series

[net-next,v16,06/26] kref/refcount: implement kref_put_sock()

Message ID 20241219-b4-ovpn-v16-6-3e3001153683@openvpn.net (mailing list archive)
State New
Headers show
Series Introducing OpenVPN Data Channel Offload | expand

Commit Message

Antonio Quartulli Dec. 19, 2024, 1:42 a.m. UTC
Similarly so kref_put_lock(), decrease the refcount
and call bh_lock_sock(sk) if it reached 0.

This kref_put variant comes handy when in need of
atomically cleanup any socket context along with
setting the refcount to 0.

Cc: Will Deacon <will@kernel.org> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Peter Zijlstra <peterz@infradead.org> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Boqun Feng <boqun.feng@gmail.com> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Mark Rutland <mark.rutland@arm.com> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Antonio Quartulli <antonio@openvpn.net>
---
 include/linux/kref.h     | 11 +++++++++++
 include/linux/refcount.h |  3 +++
 lib/refcount.c           | 32 ++++++++++++++++++++++++++++++++
 3 files changed, 46 insertions(+)

Comments

Will Deacon Dec. 19, 2024, 5:20 p.m. UTC | #1
On Thu, Dec 19, 2024 at 02:42:00AM +0100, Antonio Quartulli wrote:
> Similarly so kref_put_lock(), decrease the refcount
> and call bh_lock_sock(sk) if it reached 0.
> 
> This kref_put variant comes handy when in need of
> atomically cleanup any socket context along with
> setting the refcount to 0.
> 
> Cc: Will Deacon <will@kernel.org> (maintainer:ATOMIC INFRASTRUCTURE)
> Cc: Peter Zijlstra <peterz@infradead.org> (maintainer:ATOMIC INFRASTRUCTURE)
> Cc: Boqun Feng <boqun.feng@gmail.com> (reviewer:ATOMIC INFRASTRUCTURE)
> Cc: Mark Rutland <mark.rutland@arm.com> (reviewer:ATOMIC INFRASTRUCTURE)
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Antonio Quartulli <antonio@openvpn.net>
> ---
>  include/linux/kref.h     | 11 +++++++++++
>  include/linux/refcount.h |  3 +++
>  lib/refcount.c           | 32 ++++++++++++++++++++++++++++++++

[...]

> diff --git a/lib/refcount.c b/lib/refcount.c
> index a207a8f22b3ca35890671e51c480266d89e4d8d6..76a728581aa49a41ef13f5141f3f2e9816d72e75 100644
> --- a/lib/refcount.c
> +++ b/lib/refcount.c
> @@ -7,6 +7,7 @@
>  #include <linux/refcount.h>
>  #include <linux/spinlock.h>
>  #include <linux/bug.h>
> +#include <net/sock.h>
>  
>  #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
>  
> @@ -156,6 +157,37 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
>  }
>  EXPORT_SYMBOL(refcount_dec_and_lock);
>  
> +/**
> + * refcount_dec_and_lock_sock - return holding locked sock if able to decrement
> + *				refcount to 0
> + * @r: the refcount
> + * @sock: the sock to be locked
> + *
> + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
> + * decrement when saturated at REFCOUNT_SATURATED.
> + *
> + * Provides release memory ordering, such that prior loads and stores are done
> + * before, and provides a control dependency such that free() must come after.
> + * See the comment on top.
> + *
> + * Return: true and hold sock if able to decrement refcount to 0, false
> + *	   otherwise
> + */
> +bool refcount_dec_and_lock_sock(refcount_t *r, struct sock *sock)
> +{
> +	if (refcount_dec_not_one(r))
> +		return false;
> +
> +	bh_lock_sock(sock);
> +	if (!refcount_dec_and_test(r)) {
> +		bh_unlock_sock(sock);
> +		return false;
> +	}
> +
> +	return true;
> +}
> +EXPORT_SYMBOL(refcount_dec_and_lock_sock);

It feels a little out-of-place to me having socket-specific functions in
lib/refcount.c. I'd suggest sticking this somewhere else _or_ maybe we
could generate this pattern of code:

#define REFCOUNT_DEC_AND_LOCKNAME(lockname, locktype, lock, unlock)	\
static __always_inline							\
bool refcount_dec_and_lock_##lockname(refcount_t *r, locktype *l)	\
{									\
	...

inside a generator macro in refcount.h, like we do for seqlocks in
linux/seqlock.h. The downside of that is the cost of inlining.

Will
Antonio Quartulli Dec. 31, 2024, 7:31 a.m. UTC | #2
Hi Will,

Thanks a lot for chiming in and sorry for the delay.
See below.

On 19/12/24 18:20, Will Deacon wrote:
[...]
>> +/**
>> + * refcount_dec_and_lock_sock - return holding locked sock if able to decrement
>> + *				refcount to 0
>> + * @r: the refcount
>> + * @sock: the sock to be locked
>> + *
>> + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
>> + * decrement when saturated at REFCOUNT_SATURATED.
>> + *
>> + * Provides release memory ordering, such that prior loads and stores are done
>> + * before, and provides a control dependency such that free() must come after.
>> + * See the comment on top.
>> + *
>> + * Return: true and hold sock if able to decrement refcount to 0, false
>> + *	   otherwise
>> + */
>> +bool refcount_dec_and_lock_sock(refcount_t *r, struct sock *sock)
>> +{
>> +	if (refcount_dec_not_one(r))
>> +		return false;
>> +
>> +	bh_lock_sock(sock);
>> +	if (!refcount_dec_and_test(r)) {
>> +		bh_unlock_sock(sock);
>> +		return false;
>> +	}
>> +
>> +	return true;
>> +}
>> +EXPORT_SYMBOL(refcount_dec_and_lock_sock);
> 
> It feels a little out-of-place to me having socket-specific functions in
> lib/refcount.c. I'd suggest sticking this somewhere else _or_ maybe we
> could generate this pattern of code:
> 
> #define REFCOUNT_DEC_AND_LOCKNAME(lockname, locktype, lock, unlock)	\
> static __always_inline							\
> bool refcount_dec_and_lock_##lockname(refcount_t *r, locktype *l)	\
> {									\
> 	...
> 
> inside a generator macro in refcount.h, like we do for seqlocks in
> linux/seqlock.h. The downside of that is the cost of inlining.

Does your suggestion imply that I should convert already existing 
functions to this macro?
In that case I believe the change would be too invasive and other devs 
may not like the inlining, as you pointed out.

Secondly, I thought about moving this function to net/core/sock.c, but 
if you look at it, its logic is mostly about refcounting, with a little 
touch of sock. Hence, sock.c (or any other net file) does not seem 
appropriate either.

I guess for the time being it is more convenient to keep this function, 
and is kref counterpart, inside 'ovpn'.
They can be moved to better spots once they gained another user.

Best Regards,
diff mbox series

Patch

diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c292452db99b915b1bb6c3ab15e53..68b11b0c9c0fdd17cf706e0eb15f74cfe2efa178 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -90,6 +90,17 @@  static inline int kref_put_lock(struct kref *kref,
 	return 0;
 }
 
+static inline int kref_put_sock(struct kref *kref,
+				void (*release)(struct kref *kref),
+				struct sock *sock)
+{
+	if (refcount_dec_and_lock_sock(&kref->refcount, sock)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
 /**
  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  * @kref: object.
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 35f039ecb2725618ca098e3515c6e19e2aece3ee..22698db1f24fdaf884a508b8a0cd44eb62194a9f 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -100,6 +100,7 @@ 
 #include <linux/spinlock_types.h>
 
 struct mutex;
+struct sock;
 
 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
 #define REFCOUNT_MAX		INT_MAX
@@ -358,4 +359,6 @@  extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
 						       spinlock_t *lock,
 						       unsigned long *flags) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock_sock(refcount_t *r,
+						    struct sock *sock);
 #endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3ca35890671e51c480266d89e4d8d6..76a728581aa49a41ef13f5141f3f2e9816d72e75 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -7,6 +7,7 @@ 
 #include <linux/refcount.h>
 #include <linux/spinlock.h>
 #include <linux/bug.h>
+#include <net/sock.h>
 
 #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
 
@@ -156,6 +157,37 @@  bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_sock - return holding locked sock if able to decrement
+ *				refcount to 0
+ * @r: the refcount
+ * @sock: the sock to be locked
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold sock if able to decrement refcount to 0, false
+ *	   otherwise
+ */
+bool refcount_dec_and_lock_sock(refcount_t *r, struct sock *sock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	bh_lock_sock(sock);
+	if (!refcount_dec_and_test(r)) {
+		bh_unlock_sock(sock);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_sock);
+
 /**
  * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
  *                                 interrupts if able to decrement refcount to 0