@@ -106,7 +106,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
- atomic_set(&blkg->refcnt, 1);
+ refcount_set(&blkg->refcnt, 1);
/* root blkg uses @q->root_rl, init rl only for !root blkgs */
if (blkcg != &blkcg_root) {
@@ -19,6 +19,7 @@
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
@@ -122,7 +123,7 @@ struct blkcg_gq {
struct request_list rl;
/* reference count */
- atomic_t refcnt;
+ refcount_t refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -354,8 +355,7 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
+ refcount_inc(&blkg->refcnt);
}
void __blkg_release_rcu(struct rcu_head *rcu);
@@ -366,8 +366,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
+ if (refcount_dec_and_test(&blkg->refcnt))
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
}