diff mbox series

[RFC,1/2] percpu_ref: add percpu_ref_tryget_many_live

Message ID 20210415103310.1513841-2-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series block: support to freeze bio based queue | expand

Commit Message

Ming Lei April 15, 2021, 10:33 a.m. UTC
Prepare for support freezing bio based request queues.

Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 include/linux/percpu-refcount.h | 30 ++++++++++++++++++++++++++----
 1 file changed, 26 insertions(+), 4 deletions(-)

Comments

Bart Van Assche April 15, 2021, 7:50 p.m. UTC | #1
On 4/15/21 3:33 AM, Ming Lei wrote:
>  /**
> - * percpu_ref_tryget_live - try to increment a live percpu refcount
> + * percpu_ref_tryget_many_live - try to increment a live percpu refcount
>   * @ref: percpu_ref to try-get
> + * @nr: number of references to get

You may want to change "increment" into "increase" to make it more clear
that this function may increase the percpu refcount by more than one.

Anyway:

Reviewed-by: Bart Van Assche <bvanassche@acm.org>

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel
diff mbox series

Patch

diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 16c35a728b4c..9061c7e3113d 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -267,8 +267,9 @@  static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 }
 
 /**
- * percpu_ref_tryget_live - try to increment a live percpu refcount
+ * percpu_ref_tryget_many_live - try to increment a live percpu refcount
  * @ref: percpu_ref to try-get
+ * @nr: number of references to get
  *
  * Increment a percpu refcount unless it has already been killed.  Returns
  * %true on success; %false on failure.
@@ -281,7 +282,8 @@  static inline bool percpu_ref_tryget(struct percpu_ref *ref)
  *
  * This function is safe to call as long as @ref is between init and exit.
  */
-static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
+static inline bool percpu_ref_tryget_many_live(struct percpu_ref *ref,
+					       unsigned long nr)
 {
 	unsigned long __percpu *percpu_count;
 	bool ret = false;
@@ -289,10 +291,10 @@  static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 	rcu_read_lock();
 
 	if (__ref_is_percpu(ref, &percpu_count)) {
-		this_cpu_inc(*percpu_count);
+		this_cpu_add(*percpu_count, nr);
 		ret = true;
 	} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
-		ret = atomic_long_inc_not_zero(&ref->data->count);
+		ret = atomic_long_add_unless(&ref->data->count, nr, 0);
 	}
 
 	rcu_read_unlock();
@@ -300,6 +302,26 @@  static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 	return ret;
 }
 
+/**
+ * percpu_ref_tryget_live - try to increment a live percpu refcount
+ * @ref: percpu_ref to try-get
+ *
+ * Increment a percpu refcount unless it has already been killed.  Returns
+ * %true on success; %false on failure.
+ *
+ * Completion of percpu_ref_kill() in itself doesn't guarantee that this
+ * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
+ * should be used.  After the confirm_kill callback is invoked, it's
+ * guaranteed that no new reference will be given out by
+ * percpu_ref_tryget_live().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
+{
+	return percpu_ref_tryget_many_live(ref, 1);
+}
+
 /**
  * percpu_ref_put_many - decrement a percpu refcount
  * @ref: percpu_ref to put