@@ -3373,7 +3373,7 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
struct blk_mq_tags **new_tags;
if (cur_nr_hw_queues >= new_nr_hw_queues)
- return 0;
+ goto out;
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
@@ -3385,8 +3385,9 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
sizeof(*set->tags));
kfree(set->tags);
set->tags = new_tags;
- set->nr_hw_queues = new_nr_hw_queues;
+out:
+ set->nr_hw_queues = new_nr_hw_queues;
return 0;
}
@@ -3672,7 +3673,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
0)
goto reregister;
- set->nr_hw_queues = nr_hw_queues;
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
Move 'set->nr_hw_queues = new_nr_hw_queues' assignment into blk_mq_realloc_tag_set_tags() when 'cur_nr_hw_queues >= new_nr_hw_queues', to make the assignement encapsulated in one function. Besides, it can reduce the redundant assignement when 'cur_nr_hw_queues < new_nr_hw_queues' called from __blk_mq_update_nr_hw_queues(). Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com> --- Changes since v1: Make 'set->nr_hw_queues = new_nr_hw_queues' assigenment when 'cur_nr_hw_queues >= new_nr_hw_queues' in blk_mq_realloc_tag_set_tags(). --- block/blk-mq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)