diff mbox series

[v9,10/17] refcount: introduce __refcount_{add|inc}_not_zero_limited

Message ID 20250111042604.3230628-11-surenb@google.com (mailing list archive)
State New
Headers show
Series reimplement per-vma lock as a refcount | expand

Commit Message

Suren Baghdasaryan Jan. 11, 2025, 4:25 a.m. UTC
Introduce functions to increase refcount but with a top limit above which
they will fail to increase (the limit is inclusive). Setting the limit to
INT_MAX indicates no limit.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 include/linux/refcount.h | 24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

Comments

Hillf Danton Jan. 11, 2025, 6:31 a.m. UTC | #1
On Fri, 10 Jan 2025 20:25:57 -0800 Suren Baghdasaryan <surenb@google.com>
> -bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
> +bool __refcount_add_not_zero_limited(int i, refcount_t *r, int *oldp,
> +				     int limit)
>  {
>  	int old = refcount_read(r);
>  
>  	do {
>  		if (!old)
>  			break;
> +
> +		if (statically_true(limit == INT_MAX))
> +			continue;
> +
> +		if (i > limit - old) {
> +			if (oldp)
> +				*oldp = old;
> +			return false;
> +		}
>  	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));

The acquire version should be used, see atomic_long_try_cmpxchg_acquire()
in kernel/locking/rwsem.c.

Why not use the atomic_long_t without bothering to add this limited version?
diff mbox series

Patch

diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 35f039ecb272..5072ba99f05e 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -137,13 +137,23 @@  static inline unsigned int refcount_read(const refcount_t *r)
 }
 
 static inline __must_check __signed_wrap
-bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
+bool __refcount_add_not_zero_limited(int i, refcount_t *r, int *oldp,
+				     int limit)
 {
 	int old = refcount_read(r);
 
 	do {
 		if (!old)
 			break;
+
+		if (statically_true(limit == INT_MAX))
+			continue;
+
+		if (i > limit - old) {
+			if (oldp)
+				*oldp = old;
+			return false;
+		}
 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
 
 	if (oldp)
@@ -155,6 +165,12 @@  bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
 	return old;
 }
 
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
+{
+	return __refcount_add_not_zero_limited(i, r, oldp, INT_MAX);
+}
+
 /**
  * refcount_add_not_zero - add a value to a refcount unless it is 0
  * @i: the value to add to the refcount
@@ -213,6 +229,12 @@  static inline void refcount_add(int i, refcount_t *r)
 	__refcount_add(i, r, NULL);
 }
 
+static inline __must_check bool __refcount_inc_not_zero_limited(refcount_t *r,
+								int *oldp, int limit)
+{
+	return __refcount_add_not_zero_limited(1, r, oldp, limit);
+}
+
 static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
 {
 	return __refcount_add_not_zero(1, r, oldp);