@@ -367,7 +367,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
int i;
lockdep_assert_held(&blkg->q->queue_lock);
- lockdep_assert_held(&blkcg->blkg_array.xa_lock);
/* Something wrong if we are trying to remove same group twice */
WARN_ON_ONCE(list_empty(&blkg->q_node));
@@ -386,7 +385,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
blkg->online = false;
- __xa_erase(&blkcg->blkg_array, blkg->q->id);
+ xa_erase(&blkcg->blkg_array, blkg->q->id);
list_del_init(&blkg->q_node);
/*
@@ -416,11 +415,7 @@ static void blkg_destroy_all(struct request_queue *q)
spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
- struct blkcg *blkcg = blkg->blkcg;
-
- xa_lock(&blkcg->blkg_array);
blkg_destroy(blkg);
- xa_unlock(&blkcg->blkg_array);
}
q->root_blkg = NULL;
@@ -1080,9 +1075,7 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct request_queue *q = blkg->q;
spin_lock_irq(&q->queue_lock);
- xa_lock(&blkcg->blkg_array);
blkg_destroy(blkg);
- xa_unlock(&blkcg->blkg_array);
spin_unlock_irq(&q->queue_lock);
}
}
We can now take and release the blkg_array lock within blkg_destroy() instead of forcing the caller to hold it across the call. Signed-off-by: Matthew Wilcox <willy@infradead.org> --- block/blk-cgroup.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-)