diff mbox series

[1/1] virtio-blk: simplify probe function

Message ID 20221002154417.34583-1-mgurtovoy@nvidia.com (mailing list archive)
State New, archived
Headers show
Series [1/1] virtio-blk: simplify probe function | expand

Commit Message

Max Gurtovoy Oct. 2, 2022, 3:44 p.m. UTC
Divide the extremely long probe function to small meaningful functions.
This makes the code more readably and easy to maintain.

Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
 drivers/block/virtio_blk.c | 227 +++++++++++++++++++++----------------
 1 file changed, 131 insertions(+), 96 deletions(-)

Comments

Michael S. Tsirkin Oct. 2, 2022, 3:50 p.m. UTC | #1
On Sun, Oct 02, 2022 at 06:44:17PM +0300, Max Gurtovoy wrote:
> Divide the extremely long probe function to small meaningful functions.
> This makes the code more readably and easy to maintain.
> 
> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>

This is subjective ... pls CC some reviewers. If Stefan or Paolo
ack this I will merge.

> ---
>  drivers/block/virtio_blk.c | 227 +++++++++++++++++++++----------------
>  1 file changed, 131 insertions(+), 96 deletions(-)
> 
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index 30255fcaf181..bdd44069bf6f 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -882,28 +882,13 @@ static const struct blk_mq_ops virtio_mq_ops = {
>  static unsigned int virtblk_queue_depth;
>  module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
>  
> -static int virtblk_probe(struct virtio_device *vdev)
> +static int virtblk_q_limits_set(struct virtio_device *vdev,
> +		struct request_queue *q)
>  {
> -	struct virtio_blk *vblk;
> -	struct request_queue *q;
> -	int err, index;
> -
> -	u32 v, blk_size, max_size, sg_elems, opt_io_size;
> -	u16 min_io_size;
>  	u8 physical_block_exp, alignment_offset;
> -	unsigned int queue_depth;
> -
> -	if (!vdev->config->get) {
> -		dev_err(&vdev->dev, "%s failure: config access disabled\n",
> -			__func__);
> -		return -EINVAL;
> -	}
> -
> -	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
> -			     GFP_KERNEL);
> -	if (err < 0)
> -		goto out;
> -	index = err;
> +	u16 min_io_size;
> +	u32 v, blk_size, max_size, sg_elems, opt_io_size;
> +	int err;
>  
>  	/* We need to know how many segments before we allocate. */
>  	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
> @@ -917,73 +902,6 @@ static int virtblk_probe(struct virtio_device *vdev)
>  	/* Prevent integer overflows and honor max vq size */
>  	sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
>  
> -	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
> -	if (!vblk) {
> -		err = -ENOMEM;
> -		goto out_free_index;
> -	}
> -
> -	mutex_init(&vblk->vdev_mutex);
> -
> -	vblk->vdev = vdev;
> -
> -	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
> -
> -	err = init_vq(vblk);
> -	if (err)
> -		goto out_free_vblk;
> -
> -	/* Default queue sizing is to fill the ring. */
> -	if (!virtblk_queue_depth) {
> -		queue_depth = vblk->vqs[0].vq->num_free;
> -		/* ... but without indirect descs, we use 2 descs per req */
> -		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
> -			queue_depth /= 2;
> -	} else {
> -		queue_depth = virtblk_queue_depth;
> -	}
> -
> -	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
> -	vblk->tag_set.ops = &virtio_mq_ops;
> -	vblk->tag_set.queue_depth = queue_depth;
> -	vblk->tag_set.numa_node = NUMA_NO_NODE;
> -	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
> -	vblk->tag_set.cmd_size =
> -		sizeof(struct virtblk_req) +
> -		sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
> -	vblk->tag_set.driver_data = vblk;
> -	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
> -	vblk->tag_set.nr_maps = 1;
> -	if (vblk->io_queues[HCTX_TYPE_POLL])
> -		vblk->tag_set.nr_maps = 3;
> -
> -	err = blk_mq_alloc_tag_set(&vblk->tag_set);
> -	if (err)
> -		goto out_free_vq;
> -
> -	vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
> -	if (IS_ERR(vblk->disk)) {
> -		err = PTR_ERR(vblk->disk);
> -		goto out_free_tags;
> -	}
> -	q = vblk->disk->queue;
> -
> -	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
> -
> -	vblk->disk->major = major;
> -	vblk->disk->first_minor = index_to_minor(index);
> -	vblk->disk->minors = 1 << PART_BITS;
> -	vblk->disk->private_data = vblk;
> -	vblk->disk->fops = &virtblk_fops;
> -	vblk->index = index;
> -
> -	/* configure queue flush support */
> -	virtblk_update_cache_mode(vdev);
> -
> -	/* If disk is read-only in the host, the guest should obey */
> -	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
> -		set_disk_ro(vblk->disk, 1);
> -
>  	/* We can handle whatever the host told us to handle. */
>  	blk_queue_max_segments(q, sg_elems);
>  
> @@ -1011,12 +929,13 @@ static int virtblk_probe(struct virtio_device *vdev)
>  			dev_err(&vdev->dev,
>  				"virtio_blk: invalid block size: 0x%x\n",
>  				blk_size);
> -			goto out_cleanup_disk;
> +			return err;
>  		}
>  
>  		blk_queue_logical_block_size(q, blk_size);
> -	} else
> +	} else {
>  		blk_size = queue_logical_block_size(q);
> +	}
>  
>  	/* Use topology information if available */
>  	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
> @@ -1075,19 +994,136 @@ static int virtblk_probe(struct virtio_device *vdev)
>  		blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
>  	}
>  
> +	return 0;
> +}
> +
> +static void virtblk_tagset_put(struct virtio_blk *vblk)
> +{
> +	put_disk(vblk->disk);
> +	blk_mq_free_tag_set(&vblk->tag_set);
> +}
> +
> +static void virtblk_tagset_free(struct virtio_blk *vblk)
> +{
> +	del_gendisk(vblk->disk);
> +	blk_mq_free_tag_set(&vblk->tag_set);
> +}
> +
> +static int virtblk_tagset_alloc(struct virtio_blk *vblk,
> +		unsigned int queue_depth)
> +{
> +	int err;
> +
> +	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
> +	vblk->tag_set.ops = &virtio_mq_ops;
> +	vblk->tag_set.queue_depth = queue_depth;
> +	vblk->tag_set.numa_node = NUMA_NO_NODE;
> +	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
> +	vblk->tag_set.cmd_size =
> +		sizeof(struct virtblk_req) +
> +		sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
> +	vblk->tag_set.driver_data = vblk;
> +	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
> +	vblk->tag_set.nr_maps = 1;
> +	if (vblk->io_queues[HCTX_TYPE_POLL])
> +		vblk->tag_set.nr_maps = 3;
> +
> +	err = blk_mq_alloc_tag_set(&vblk->tag_set);
> +	if (err)
> +		return err;
> +
> +	vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
> +	if (IS_ERR(vblk->disk)) {
> +		err = PTR_ERR(vblk->disk);
> +		goto out_free_tags;
> +	}
> +
> +	return 0;
> +
> +out_free_tags:
> +	blk_mq_free_tag_set(&vblk->tag_set);
> +	return err;
> +}
> +
> +static int virtblk_probe(struct virtio_device *vdev)
> +{
> +	struct virtio_blk *vblk;
> +	int err, index;
> +	unsigned int queue_depth;
> +
> +	if (!vdev->config->get) {
> +		dev_err(&vdev->dev, "%s failure: config access disabled\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
> +			     GFP_KERNEL);
> +	if (err < 0)
> +		goto out;
> +	index = err;
> +
> +	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
> +	if (!vblk) {
> +		err = -ENOMEM;
> +		goto out_free_index;
> +	}
> +
> +	mutex_init(&vblk->vdev_mutex);
> +
> +	vblk->vdev = vdev;
> +
> +	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
> +
> +	err = init_vq(vblk);
> +	if (err)
> +		goto out_free_vblk;
> +
> +	/* Default queue sizing is to fill the ring. */
> +	if (!virtblk_queue_depth) {
> +		queue_depth = vblk->vqs[0].vq->num_free;
> +		/* ... but without indirect descs, we use 2 descs per req */
> +		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
> +			queue_depth /= 2;
> +	} else {
> +		queue_depth = virtblk_queue_depth;
> +	}
> +
> +	err = virtblk_tagset_alloc(vblk, queue_depth);
> +	if (err)
> +		goto out_free_vq;
> +
> +	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
> +
> +	vblk->disk->major = major;
> +	vblk->disk->first_minor = index_to_minor(index);
> +	vblk->disk->minors = 1 << PART_BITS;
> +	vblk->disk->private_data = vblk;
> +	vblk->disk->fops = &virtblk_fops;
> +	vblk->index = index;
> +
> +	/* configure queue flush support */
> +	virtblk_update_cache_mode(vdev);
> +
> +	/* If disk is read-only in the host, the guest should obey */
> +	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
> +		set_disk_ro(vblk->disk, 1);
> +
> +	err = virtblk_q_limits_set(vdev, vblk->disk->queue);
> +	if (err)
> +		goto out_tagset_put;
> +
>  	virtblk_update_capacity(vblk, false);
>  	virtio_device_ready(vdev);
>  
>  	err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
>  	if (err)
> -		goto out_cleanup_disk;
> +		goto out_tagset_put;
>  
>  	return 0;
>  
> -out_cleanup_disk:
> -	put_disk(vblk->disk);
> -out_free_tags:
> -	blk_mq_free_tag_set(&vblk->tag_set);
> +out_tagset_put:
> +	virtblk_tagset_put(vblk);
>  out_free_vq:
>  	vdev->config->del_vqs(vdev);
>  	kfree(vblk->vqs);
> @@ -1106,8 +1142,7 @@ static void virtblk_remove(struct virtio_device *vdev)
>  	/* Make sure no work handler is accessing the device. */
>  	flush_work(&vblk->config_work);
>  
> -	del_gendisk(vblk->disk);
> -	blk_mq_free_tag_set(&vblk->tag_set);
> +	virtblk_tagset_free(vblk);
>  
>  	mutex_lock(&vblk->vdev_mutex);
>  
> -- 
> 2.18.1
diff mbox series

Patch

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 30255fcaf181..bdd44069bf6f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -882,28 +882,13 @@  static const struct blk_mq_ops virtio_mq_ops = {
 static unsigned int virtblk_queue_depth;
 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
 
-static int virtblk_probe(struct virtio_device *vdev)
+static int virtblk_q_limits_set(struct virtio_device *vdev,
+		struct request_queue *q)
 {
-	struct virtio_blk *vblk;
-	struct request_queue *q;
-	int err, index;
-
-	u32 v, blk_size, max_size, sg_elems, opt_io_size;
-	u16 min_io_size;
 	u8 physical_block_exp, alignment_offset;
-	unsigned int queue_depth;
-
-	if (!vdev->config->get) {
-		dev_err(&vdev->dev, "%s failure: config access disabled\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
-			     GFP_KERNEL);
-	if (err < 0)
-		goto out;
-	index = err;
+	u16 min_io_size;
+	u32 v, blk_size, max_size, sg_elems, opt_io_size;
+	int err;
 
 	/* We need to know how many segments before we allocate. */
 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
@@ -917,73 +902,6 @@  static int virtblk_probe(struct virtio_device *vdev)
 	/* Prevent integer overflows and honor max vq size */
 	sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
 
-	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
-	if (!vblk) {
-		err = -ENOMEM;
-		goto out_free_index;
-	}
-
-	mutex_init(&vblk->vdev_mutex);
-
-	vblk->vdev = vdev;
-
-	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
-
-	err = init_vq(vblk);
-	if (err)
-		goto out_free_vblk;
-
-	/* Default queue sizing is to fill the ring. */
-	if (!virtblk_queue_depth) {
-		queue_depth = vblk->vqs[0].vq->num_free;
-		/* ... but without indirect descs, we use 2 descs per req */
-		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
-			queue_depth /= 2;
-	} else {
-		queue_depth = virtblk_queue_depth;
-	}
-
-	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
-	vblk->tag_set.ops = &virtio_mq_ops;
-	vblk->tag_set.queue_depth = queue_depth;
-	vblk->tag_set.numa_node = NUMA_NO_NODE;
-	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-	vblk->tag_set.cmd_size =
-		sizeof(struct virtblk_req) +
-		sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
-	vblk->tag_set.driver_data = vblk;
-	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
-	vblk->tag_set.nr_maps = 1;
-	if (vblk->io_queues[HCTX_TYPE_POLL])
-		vblk->tag_set.nr_maps = 3;
-
-	err = blk_mq_alloc_tag_set(&vblk->tag_set);
-	if (err)
-		goto out_free_vq;
-
-	vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
-	if (IS_ERR(vblk->disk)) {
-		err = PTR_ERR(vblk->disk);
-		goto out_free_tags;
-	}
-	q = vblk->disk->queue;
-
-	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
-
-	vblk->disk->major = major;
-	vblk->disk->first_minor = index_to_minor(index);
-	vblk->disk->minors = 1 << PART_BITS;
-	vblk->disk->private_data = vblk;
-	vblk->disk->fops = &virtblk_fops;
-	vblk->index = index;
-
-	/* configure queue flush support */
-	virtblk_update_cache_mode(vdev);
-
-	/* If disk is read-only in the host, the guest should obey */
-	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
-		set_disk_ro(vblk->disk, 1);
-
 	/* We can handle whatever the host told us to handle. */
 	blk_queue_max_segments(q, sg_elems);
 
@@ -1011,12 +929,13 @@  static int virtblk_probe(struct virtio_device *vdev)
 			dev_err(&vdev->dev,
 				"virtio_blk: invalid block size: 0x%x\n",
 				blk_size);
-			goto out_cleanup_disk;
+			return err;
 		}
 
 		blk_queue_logical_block_size(q, blk_size);
-	} else
+	} else {
 		blk_size = queue_logical_block_size(q);
+	}
 
 	/* Use topology information if available */
 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
@@ -1075,19 +994,136 @@  static int virtblk_probe(struct virtio_device *vdev)
 		blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
 	}
 
+	return 0;
+}
+
+static void virtblk_tagset_put(struct virtio_blk *vblk)
+{
+	put_disk(vblk->disk);
+	blk_mq_free_tag_set(&vblk->tag_set);
+}
+
+static void virtblk_tagset_free(struct virtio_blk *vblk)
+{
+	del_gendisk(vblk->disk);
+	blk_mq_free_tag_set(&vblk->tag_set);
+}
+
+static int virtblk_tagset_alloc(struct virtio_blk *vblk,
+		unsigned int queue_depth)
+{
+	int err;
+
+	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
+	vblk->tag_set.ops = &virtio_mq_ops;
+	vblk->tag_set.queue_depth = queue_depth;
+	vblk->tag_set.numa_node = NUMA_NO_NODE;
+	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+	vblk->tag_set.cmd_size =
+		sizeof(struct virtblk_req) +
+		sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
+	vblk->tag_set.driver_data = vblk;
+	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+	vblk->tag_set.nr_maps = 1;
+	if (vblk->io_queues[HCTX_TYPE_POLL])
+		vblk->tag_set.nr_maps = 3;
+
+	err = blk_mq_alloc_tag_set(&vblk->tag_set);
+	if (err)
+		return err;
+
+	vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
+	if (IS_ERR(vblk->disk)) {
+		err = PTR_ERR(vblk->disk);
+		goto out_free_tags;
+	}
+
+	return 0;
+
+out_free_tags:
+	blk_mq_free_tag_set(&vblk->tag_set);
+	return err;
+}
+
+static int virtblk_probe(struct virtio_device *vdev)
+{
+	struct virtio_blk *vblk;
+	int err, index;
+	unsigned int queue_depth;
+
+	if (!vdev->config->get) {
+		dev_err(&vdev->dev, "%s failure: config access disabled\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
+			     GFP_KERNEL);
+	if (err < 0)
+		goto out;
+	index = err;
+
+	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
+	if (!vblk) {
+		err = -ENOMEM;
+		goto out_free_index;
+	}
+
+	mutex_init(&vblk->vdev_mutex);
+
+	vblk->vdev = vdev;
+
+	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
+
+	err = init_vq(vblk);
+	if (err)
+		goto out_free_vblk;
+
+	/* Default queue sizing is to fill the ring. */
+	if (!virtblk_queue_depth) {
+		queue_depth = vblk->vqs[0].vq->num_free;
+		/* ... but without indirect descs, we use 2 descs per req */
+		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+			queue_depth /= 2;
+	} else {
+		queue_depth = virtblk_queue_depth;
+	}
+
+	err = virtblk_tagset_alloc(vblk, queue_depth);
+	if (err)
+		goto out_free_vq;
+
+	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
+
+	vblk->disk->major = major;
+	vblk->disk->first_minor = index_to_minor(index);
+	vblk->disk->minors = 1 << PART_BITS;
+	vblk->disk->private_data = vblk;
+	vblk->disk->fops = &virtblk_fops;
+	vblk->index = index;
+
+	/* configure queue flush support */
+	virtblk_update_cache_mode(vdev);
+
+	/* If disk is read-only in the host, the guest should obey */
+	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
+		set_disk_ro(vblk->disk, 1);
+
+	err = virtblk_q_limits_set(vdev, vblk->disk->queue);
+	if (err)
+		goto out_tagset_put;
+
 	virtblk_update_capacity(vblk, false);
 	virtio_device_ready(vdev);
 
 	err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
 	if (err)
-		goto out_cleanup_disk;
+		goto out_tagset_put;
 
 	return 0;
 
-out_cleanup_disk:
-	put_disk(vblk->disk);
-out_free_tags:
-	blk_mq_free_tag_set(&vblk->tag_set);
+out_tagset_put:
+	virtblk_tagset_put(vblk);
 out_free_vq:
 	vdev->config->del_vqs(vdev);
 	kfree(vblk->vqs);
@@ -1106,8 +1142,7 @@  static void virtblk_remove(struct virtio_device *vdev)
 	/* Make sure no work handler is accessing the device. */
 	flush_work(&vblk->config_work);
 
-	del_gendisk(vblk->disk);
-	blk_mq_free_tag_set(&vblk->tag_set);
+	virtblk_tagset_free(vblk);
 
 	mutex_lock(&vblk->vdev_mutex);