diff mbox series

[v3,2/2] virtio-blk: support mq_ops->queue_rqs()

Message ID 20220324140450.33148-3-suwan.kim027@gmail.com (mailing list archive)
State New, archived
Headers show
Series virtio-blk: support polling I/O and mq_ops->queue_rqs() | expand

Commit Message

Suwan Kim March 24, 2022, 2:04 p.m. UTC
This patch supports mq_ops->queue_rqs() hook. It has an advantage of
batch submission to virtio-blk driver. It also helps polling I/O because
polling uses batched completion of block layer. Batch submission in
queue_rqs() can boost polling performance.

In queue_rqs(), it iterates plug->mq_list, collects requests that
belong to same HW queue and adds requests into virtqueue until it
encounters a request from other HW queue or sees the end of the list.
Then, virtio-blk kicks virtqueue to submit requests.

If there is an error, it inserts error request to requeue_list and
passes it to ordinary block layer path.

For verification, I did fio test.
(io_uring, randread, direct=1, bs=4K, iodepth=64 numjobs=N)
I set 4 vcpu and 2 virtio-blk queues for VM and run fio test 5 times.
It shows about 1-4% improvement.

                                 |   numjobs=2   |    numjobs=4
      -----------------------------------------------------------
        fio without queue_rqs()  |   282K IOPS   |    245K IOPS
      -----------------------------------------------------------
        fio with queue_rqs()     |   294K IOPS   |    249K IOPS

For polling I/O performance, I also did fio test as below.
(io_uring, hipri, randread, direct=1, bs=512, iodepth=64 numjobs=4)
I set 4 vcpu and 2 poll queues for VM.
It shows upto 7% improvement in polling I/O.

                                      |   IOPS   |  avg latency
      -----------------------------------------------------------
        fio poll without queue_rqs()  |   413K   |   619.72 usec
      -----------------------------------------------------------
        fio poll with queue_rqs()     |   445K   |   581.2  usec

Signed-off-by: Suwan Kim <suwan.kim027@gmail.com>
---
 drivers/block/virtio_blk.c | 93 ++++++++++++++++++++++++++++++++++----
 1 file changed, 84 insertions(+), 9 deletions(-)

Comments

Stefan Hajnoczi March 28, 2022, 1:16 p.m. UTC | #1
On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> @@ -367,6 +381,66 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>  	return BLK_STS_OK;
>  }
>  
> +static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
> +{
> +	struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
> +	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
> +	unsigned long flags;
> +	int num, err;
> +
> +	req->mq_hctx->tags->rqs[req->tag] = req;
> +
> +	if (virtblk_prep_rq(req->mq_hctx, vblk, req, vbr, &num) != BLK_STS_OK)
> +		return false;
> +
> +	spin_lock_irqsave(&vq->lock, flags);
> +	err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
> +	if (err) {
> +		spin_unlock_irqrestore(&vq->lock, flags);
> +		virtblk_unmap_data(req, vbr);
> +		virtblk_cleanup_cmd(req);
> +		return false;
> +	}
> +	spin_unlock_irqrestore(&vq->lock, flags);

Simplification:

  spin_lock_irqsave(&vq->lock, flags);
  err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
  spin_unlock_irqrestore(&vq->lock, flags);
  if (err) {
      virtblk_unmap_data(req, vbr);
      virtblk_cleanup_cmd(req);
      return false;
  }

> +
> +	return true;
> +}
> +
> +static void virtio_queue_rqs(struct request **rqlist)
> +{
> +	struct request *req, *next, *prev = NULL;
> +	struct request *requeue_list = NULL;
> +
> +	rq_list_for_each_safe(rqlist, req, next) {
> +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> +		unsigned long flags;
> +		bool kick;
> +
> +		if (!virtblk_prep_rq_batch(vq, req)) {
> +			rq_list_move(rqlist, &requeue_list, req, prev);
> +			req = prev;
> +
> +			if (!req)
> +				continue;
> +		}
> +
> +		if (!next || req->mq_hctx != next->mq_hctx) {
> +			spin_lock_irqsave(&vq->lock, flags);

Did you try calling virtblk_add_req() here to avoid acquiring and
releasing the lock multiple times? In other words, do virtblk_prep_rq()
but wait until we get here to do virtblk_add_req().

I don't know if it has any measurable effect on performance or maybe the
code would become too complex, but I noticed that we're not fully
exploiting batching.

> +			kick = virtqueue_kick_prepare(vq->vq);
> +			spin_unlock_irqrestore(&vq->lock, flags);
> +			if (kick)
> +				virtqueue_notify(vq->vq);
> +
> +			req->rq_next = NULL;
> +			*rqlist = next;
> +			prev = NULL;
> +		} else
> +			prev = req;

What guarantees that req is still alive after we called
virtblk_add_req()? The device may have seen it and completed it already
by the time we get here.
Suwan Kim March 28, 2022, 3:50 p.m. UTC | #2
On Mon, Mar 28, 2022 at 02:16:13PM +0100, Stefan Hajnoczi wrote:
> On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> > @@ -367,6 +381,66 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
> >  	return BLK_STS_OK;
> >  }
> >  
> > +static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
> > +{
> > +	struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
> > +	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
> > +	unsigned long flags;
> > +	int num, err;
> > +
> > +	req->mq_hctx->tags->rqs[req->tag] = req;
> > +
> > +	if (virtblk_prep_rq(req->mq_hctx, vblk, req, vbr, &num) != BLK_STS_OK)
> > +		return false;
> > +
> > +	spin_lock_irqsave(&vq->lock, flags);
> > +	err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
> > +	if (err) {
> > +		spin_unlock_irqrestore(&vq->lock, flags);
> > +		virtblk_unmap_data(req, vbr);
> > +		virtblk_cleanup_cmd(req);
> > +		return false;
> > +	}
> > +	spin_unlock_irqrestore(&vq->lock, flags);
> 
> Simplification:
> 
>   spin_lock_irqsave(&vq->lock, flags);
>   err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
>   spin_unlock_irqrestore(&vq->lock, flags);
>   if (err) {
>       virtblk_unmap_data(req, vbr);
>       virtblk_cleanup_cmd(req);
>       return false;
>   }
> 

Thanks! I will fix it.

> > +
> > +	return true;
> > +}
> > +
> > +static void virtio_queue_rqs(struct request **rqlist)
> > +{
> > +	struct request *req, *next, *prev = NULL;
> > +	struct request *requeue_list = NULL;
> > +
> > +	rq_list_for_each_safe(rqlist, req, next) {
> > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > +		unsigned long flags;
> > +		bool kick;
> > +
> > +		if (!virtblk_prep_rq_batch(vq, req)) {
> > +			rq_list_move(rqlist, &requeue_list, req, prev);
> > +			req = prev;
> > +
> > +			if (!req)
> > +				continue;
> > +		}
> > +
> > +		if (!next || req->mq_hctx != next->mq_hctx) {
> > +			spin_lock_irqsave(&vq->lock, flags);
> 
> Did you try calling virtblk_add_req() here to avoid acquiring and
> releasing the lock multiple times? In other words, do virtblk_prep_rq()
> but wait until we get here to do virtblk_add_req().
> 
> I don't know if it has any measurable effect on performance or maybe the
> code would become too complex, but I noticed that we're not fully
> exploiting batching.

I tried as you said. I called virtlblk_add_req() and added requests
of rqlist to virtqueue in this if statement with holding the lock
only once.

I attach the code at the end of this mail.
Please refer the code.

But I didn't see improvement. It showed slightly worse performance
than the current patch.

> > +			kick = virtqueue_kick_prepare(vq->vq);
> > +			spin_unlock_irqrestore(&vq->lock, flags);
> > +			if (kick)
> > +				virtqueue_notify(vq->vq);
> > +
> > +			req->rq_next = NULL;

Did you ask this part?

> > +			*rqlist = next;
> > +			prev = NULL;
> > +		} else
> > +			prev = req;
> 
> What guarantees that req is still alive after we called
> virtblk_add_req()? The device may have seen it and completed it already
> by the time we get here.

Isn't request completed after the kick?

If you asked about "req->rq_next = NULL",
I think it should be placed before
"kick = virtqueue_kick_prepare(vq->vq);"

-----------
	req->rq_next = NULL;
	kick = virtqueue_kick_prepare(vq->vq);
	spin_unlock_irqrestore(&vq->lock, flags);
	if (kick)
		virtqueue_notify(vq->vq);
-----------

Regards,
Suwan Kim

---

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2218cab39c72..d972d3042068 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -92,6 +92,7 @@ struct virtio_blk {
 struct virtblk_req {
 	struct virtio_blk_outhdr out_hdr;
 	u8 status;
+	int sg_num;
 	struct sg_table sg_table;
 	struct scatterlist sg[];
 };
@@ -311,18 +312,13 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
 		virtqueue_notify(vq->vq);
 }

-static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
-			   const struct blk_mq_queue_data *bd)
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+					struct virtio_blk *vblk,
+					struct request *req,
+					struct virtblk_req *vbr)
 {
-	struct virtio_blk *vblk = hctx->queue->queuedata;
-	struct request *req = bd->rq;
-	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
-	unsigned long flags;
-	int num;
-	int qid = hctx->queue_num;
-	bool notify = false;
 	blk_status_t status;
-	int err;
+	int num;

 	status = virtblk_setup_cmd(vblk->vdev, req, vbr);
 	if (unlikely(status))
@@ -335,9 +331,30 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 		virtblk_cleanup_cmd(req);
 		return BLK_STS_RESOURCE;
 	}
+	vbr->sg_num = num;
+
+	return BLK_STS_OK;
+}
+
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+			   const struct blk_mq_queue_data *bd)
+{
+	struct virtio_blk *vblk = hctx->queue->queuedata;
+	struct request *req = bd->rq;
+	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+	unsigned long flags;
+	int qid = hctx->queue_num;
+	bool notify = false;
+	blk_status_t status;
+	int err;
+
+	status = virtblk_prep_rq(hctx, vblk, req, vbr);
+	if (unlikely(status))
+		return status;

 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
-	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+	err = virtblk_add_req(vblk->vqs[qid].vq, vbr,
+				vbr->sg_table.sgl, vbr->sg_num);
 	if (err) {
 		virtqueue_kick(vblk->vqs[qid].vq);
 		/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -367,6 +384,76 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return BLK_STS_OK;
 }

+static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
+{
+	struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+	req->mq_hctx->tags->rqs[req->tag] = req;
+
+	return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+					struct request **rqlist,
+					struct request **requeue_list)
+{
+	unsigned long flags;
+	int err;
+	bool kick;
+
+	spin_lock_irqsave(&vq->lock, flags);
+	while (!rq_list_empty(*rqlist)) {
+		struct request *req = rq_list_pop(rqlist);
+		struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+		err = virtblk_add_req(vq->vq, vbr,
+					vbr->sg_table.sgl, vbr->sg_num);
+		if (err) {
+			virtblk_unmap_data(req, vbr);
+			virtblk_cleanup_cmd(req);
+			rq_list_add(requeue_list, req);
+		}
+	}
+
+	kick = virtqueue_kick_prepare(vq->vq);
+	spin_unlock_irqrestore(&vq->lock, flags);
+
+	return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+	struct request *req, *next, *prev = NULL;
+	struct request *requeue_list = NULL;
+
+	rq_list_for_each_safe(rqlist, req, next) {
+		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+		bool kick;
+
+		if (!virtblk_prep_rq_batch(vq, req)) {
+			rq_list_move(rqlist, &requeue_list, req, prev);
+			req = prev;
+
+			if (!req)
+				continue;
+		}
+
+		if (!next || req->mq_hctx != next->mq_hctx) {
+			kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+			if (kick)
+				virtqueue_notify(vq->vq);
+
+			req->rq_next = NULL;
+			*rqlist = next;
+			prev = NULL;
+		} else
+			prev = req;
+	}
+
+	*rqlist = requeue_list;
+}
+
 /* return id (s/n) string for *disk to *id_str
  */
 static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -818,6 +905,7 @@ static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,

 static const struct blk_mq_ops virtio_mq_ops = {
 	.queue_rq	= virtio_queue_rq,
+	.queue_rqs	= virtio_queue_rqs,
 	.commit_rqs	= virtio_commit_rqs,
 	.init_hctx	= virtblk_init_hctx,
 	.complete	= virtblk_request_done,
Stefan Hajnoczi March 29, 2022, 8:45 a.m. UTC | #3
On Tue, Mar 29, 2022 at 12:50:33AM +0900, Suwan Kim wrote:
> On Mon, Mar 28, 2022 at 02:16:13PM +0100, Stefan Hajnoczi wrote:
> > On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> > > +static void virtio_queue_rqs(struct request **rqlist)
> > > +{
> > > +	struct request *req, *next, *prev = NULL;
> > > +	struct request *requeue_list = NULL;
> > > +
> > > +	rq_list_for_each_safe(rqlist, req, next) {
> > > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > > +		unsigned long flags;
> > > +		bool kick;
> > > +
> > > +		if (!virtblk_prep_rq_batch(vq, req)) {
> > > +			rq_list_move(rqlist, &requeue_list, req, prev);
> > > +			req = prev;
> > > +
> > > +			if (!req)
> > > +				continue;
> > > +		}
> > > +
> > > +		if (!next || req->mq_hctx != next->mq_hctx) {
> > > +			spin_lock_irqsave(&vq->lock, flags);
> > 
> > Did you try calling virtblk_add_req() here to avoid acquiring and
> > releasing the lock multiple times? In other words, do virtblk_prep_rq()
> > but wait until we get here to do virtblk_add_req().
> > 
> > I don't know if it has any measurable effect on performance or maybe the
> > code would become too complex, but I noticed that we're not fully
> > exploiting batching.
> 
> I tried as you said. I called virtlblk_add_req() and added requests
> of rqlist to virtqueue in this if statement with holding the lock
> only once.
> 
> I attach the code at the end of this mail.
> Please refer the code.
> 
> But I didn't see improvement. It showed slightly worse performance
> than the current patch.

Okay, thanks for trying it!

> > > +			kick = virtqueue_kick_prepare(vq->vq);
> > > +			spin_unlock_irqrestore(&vq->lock, flags);
> > > +			if (kick)
> > > +				virtqueue_notify(vq->vq);
> > > +
> > > +			req->rq_next = NULL;
> 
> Did you ask this part?
> 
> > > +			*rqlist = next;
> > > +			prev = NULL;
> > > +		} else
> > > +			prev = req;
> > 
> > What guarantees that req is still alive after we called
> > virtblk_add_req()? The device may have seen it and completed it already
> > by the time we get here.
> 
> Isn't request completed after the kick?
> 
> If you asked about "req->rq_next = NULL",
> I think it should be placed before
> "kick = virtqueue_kick_prepare(vq->vq);"
> 
> -----------
> 	req->rq_next = NULL;
> 	kick = virtqueue_kick_prepare(vq->vq);
> 	spin_unlock_irqrestore(&vq->lock, flags);
> 	if (kick)
> 		virtqueue_notify(vq->vq);
> -----------

No, virtqueue_add_sgs() exposes vring descriptors to the device. The
device may process immediately. In other words, VIRTIO devices may poll
the vring instead of waiting for virtqueue_notify(). There is no
guarantee that the request is alive until virtqueue_notify() is called.

The code has to handle the case where the request is completed during
virtqueue_add_sgs().

Stefan
Suwan Kim March 29, 2022, 1:48 p.m. UTC | #4
On Tue, Mar 29, 2022 at 09:45:29AM +0100, Stefan Hajnoczi wrote:
> On Tue, Mar 29, 2022 at 12:50:33AM +0900, Suwan Kim wrote:
> > On Mon, Mar 28, 2022 at 02:16:13PM +0100, Stefan Hajnoczi wrote:
> > > On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> > > > +static void virtio_queue_rqs(struct request **rqlist)
> > > > +{
> > > > +	struct request *req, *next, *prev = NULL;
> > > > +	struct request *requeue_list = NULL;
> > > > +
> > > > +	rq_list_for_each_safe(rqlist, req, next) {
> > > > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > > > +		unsigned long flags;
> > > > +		bool kick;
> > > > +
> > > > +		if (!virtblk_prep_rq_batch(vq, req)) {
> > > > +			rq_list_move(rqlist, &requeue_list, req, prev);
> > > > +			req = prev;
> > > > +
> > > > +			if (!req)
> > > > +				continue;
> > > > +		}
> > > > +
> > > > +		if (!next || req->mq_hctx != next->mq_hctx) {
> > > > +			spin_lock_irqsave(&vq->lock, flags);
> > > 
> > > Did you try calling virtblk_add_req() here to avoid acquiring and
> > > releasing the lock multiple times? In other words, do virtblk_prep_rq()
> > > but wait until we get here to do virtblk_add_req().
> > > 
> > > I don't know if it has any measurable effect on performance or maybe the
> > > code would become too complex, but I noticed that we're not fully
> > > exploiting batching.
> > 
> > I tried as you said. I called virtlblk_add_req() and added requests
> > of rqlist to virtqueue in this if statement with holding the lock
> > only once.
> > 
> > I attach the code at the end of this mail.
> > Please refer the code.
> > 
> > But I didn't see improvement. It showed slightly worse performance
> > than the current patch.
> 
> Okay, thanks for trying it!
> 
> > > > +			kick = virtqueue_kick_prepare(vq->vq);
> > > > +			spin_unlock_irqrestore(&vq->lock, flags);
> > > > +			if (kick)
> > > > +				virtqueue_notify(vq->vq);
> > > > +
> > > > +			req->rq_next = NULL;
> > 
> > Did you ask this part?
> > 
> > > > +			*rqlist = next;
> > > > +			prev = NULL;
> > > > +		} else
> > > > +			prev = req;
> > > 
> > > What guarantees that req is still alive after we called
> > > virtblk_add_req()? The device may have seen it and completed it already
> > > by the time we get here.
> > 
> > Isn't request completed after the kick?
> > 
> > If you asked about "req->rq_next = NULL",
> > I think it should be placed before
> > "kick = virtqueue_kick_prepare(vq->vq);"
> > 
> > -----------
> > 	req->rq_next = NULL;
> > 	kick = virtqueue_kick_prepare(vq->vq);
> > 	spin_unlock_irqrestore(&vq->lock, flags);
> > 	if (kick)
> > 		virtqueue_notify(vq->vq);
> > -----------
> 
> No, virtqueue_add_sgs() exposes vring descriptors to the device. The
> device may process immediately. In other words, VIRTIO devices may poll
> the vring instead of waiting for virtqueue_notify(). There is no
> guarantee that the request is alive until virtqueue_notify() is called.
> 
> The code has to handle the case where the request is completed during
> virtqueue_add_sgs().

Thanks for the explanation.

We should not use req again after virtblk_add_req().
I understand...

Then, as you commented in previous mail, is it ok that we do
virtblk_add_req() in "if (!next || req->mq_hctx != next->mq_hctx)"
statement to avoid use req again after virtblk_add_req() as below code?

In this code, It adds reqs to virtqueue in batch just before
virtqueue_notify(), and it doesn't use req again after calling
virtblk_add_req().

If it is fine, I will try it again.
This code is slightly different from the code I sent in previous mail.

---
static void virtio_queue_rqs(struct request **rqlist)
...
	rq_list_for_each_safe(rqlist, req, next) {
...
		if (!next || req->mq_hctx != next->mq_hctx) {
			// Cut the list at current req
			req->rq_next = NULL;
			// Add req list to virtqueue in batch with holding lock once
			kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
			if (kick)
				virtqueue_notify(vq->vq);

			// setup new req list. Don't use previous req again.
			*rqlist = next;
			prev = NULL;
...
---

Regards,
Suwan Kim
Stefan Hajnoczi March 29, 2022, 3:01 p.m. UTC | #5
On Tue, Mar 29, 2022 at 10:48:16PM +0900, Suwan Kim wrote:
> On Tue, Mar 29, 2022 at 09:45:29AM +0100, Stefan Hajnoczi wrote:
> > On Tue, Mar 29, 2022 at 12:50:33AM +0900, Suwan Kim wrote:
> > > On Mon, Mar 28, 2022 at 02:16:13PM +0100, Stefan Hajnoczi wrote:
> > > > On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> > > > > +static void virtio_queue_rqs(struct request **rqlist)
> > > > > +{
> > > > > +	struct request *req, *next, *prev = NULL;
> > > > > +	struct request *requeue_list = NULL;
> > > > > +
> > > > > +	rq_list_for_each_safe(rqlist, req, next) {
> > > > > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > > > > +		unsigned long flags;
> > > > > +		bool kick;
> > > > > +
> > > > > +		if (!virtblk_prep_rq_batch(vq, req)) {
> > > > > +			rq_list_move(rqlist, &requeue_list, req, prev);
> > > > > +			req = prev;
> > > > > +
> > > > > +			if (!req)
> > > > > +				continue;
> > > > > +		}
> > > > > +
> > > > > +		if (!next || req->mq_hctx != next->mq_hctx) {
> > > > > +			spin_lock_irqsave(&vq->lock, flags);
> > > > 
> > > > Did you try calling virtblk_add_req() here to avoid acquiring and
> > > > releasing the lock multiple times? In other words, do virtblk_prep_rq()
> > > > but wait until we get here to do virtblk_add_req().
> > > > 
> > > > I don't know if it has any measurable effect on performance or maybe the
> > > > code would become too complex, but I noticed that we're not fully
> > > > exploiting batching.
> > > 
> > > I tried as you said. I called virtlblk_add_req() and added requests
> > > of rqlist to virtqueue in this if statement with holding the lock
> > > only once.
> > > 
> > > I attach the code at the end of this mail.
> > > Please refer the code.
> > > 
> > > But I didn't see improvement. It showed slightly worse performance
> > > than the current patch.
> > 
> > Okay, thanks for trying it!
> > 
> > > > > +			kick = virtqueue_kick_prepare(vq->vq);
> > > > > +			spin_unlock_irqrestore(&vq->lock, flags);
> > > > > +			if (kick)
> > > > > +				virtqueue_notify(vq->vq);
> > > > > +
> > > > > +			req->rq_next = NULL;
> > > 
> > > Did you ask this part?
> > > 
> > > > > +			*rqlist = next;
> > > > > +			prev = NULL;
> > > > > +		} else
> > > > > +			prev = req;
> > > > 
> > > > What guarantees that req is still alive after we called
> > > > virtblk_add_req()? The device may have seen it and completed it already
> > > > by the time we get here.
> > > 
> > > Isn't request completed after the kick?
> > > 
> > > If you asked about "req->rq_next = NULL",
> > > I think it should be placed before
> > > "kick = virtqueue_kick_prepare(vq->vq);"
> > > 
> > > -----------
> > > 	req->rq_next = NULL;
> > > 	kick = virtqueue_kick_prepare(vq->vq);
> > > 	spin_unlock_irqrestore(&vq->lock, flags);
> > > 	if (kick)
> > > 		virtqueue_notify(vq->vq);
> > > -----------
> > 
> > No, virtqueue_add_sgs() exposes vring descriptors to the device. The
> > device may process immediately. In other words, VIRTIO devices may poll
> > the vring instead of waiting for virtqueue_notify(). There is no
> > guarantee that the request is alive until virtqueue_notify() is called.
> > 
> > The code has to handle the case where the request is completed during
> > virtqueue_add_sgs().
> 
> Thanks for the explanation.
> 
> We should not use req again after virtblk_add_req().
> I understand...
> 
> Then, as you commented in previous mail, is it ok that we do
> virtblk_add_req() in "if (!next || req->mq_hctx != next->mq_hctx)"
> statement to avoid use req again after virtblk_add_req() as below code?
> 
> In this code, It adds reqs to virtqueue in batch just before
> virtqueue_notify(), and it doesn't use req again after calling
> virtblk_add_req().
> 
> If it is fine, I will try it again.
> This code is slightly different from the code I sent in previous mail.
> 
> ---
> static void virtio_queue_rqs(struct request **rqlist)
> ...
> 	rq_list_for_each_safe(rqlist, req, next) {
> ...
> 		if (!next || req->mq_hctx != next->mq_hctx) {
> 			// Cut the list at current req
> 			req->rq_next = NULL;
> 			// Add req list to virtqueue in batch with holding lock once
> 			kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
> 			if (kick)
> 				virtqueue_notify(vq->vq);
> 
> 			// setup new req list. Don't use previous req again.
> 			*rqlist = next;
> 			prev = NULL;
> ...

Yes, that sounds good.

(I noticed struct request has a reference count so that might be a way
to keep requests alive, if necessary, but I haven't investigated. See
req_ref_put_and_test() though it's not used by block drivers and maybe
virtio-blk shouldn't mess with it either.)

Stefan
Suwan Kim March 29, 2022, 3:54 p.m. UTC | #6
On Tue, Mar 29, 2022 at 04:01:46PM +0100, Stefan Hajnoczi wrote:
> On Tue, Mar 29, 2022 at 10:48:16PM +0900, Suwan Kim wrote:
> > On Tue, Mar 29, 2022 at 09:45:29AM +0100, Stefan Hajnoczi wrote:
> > > On Tue, Mar 29, 2022 at 12:50:33AM +0900, Suwan Kim wrote:
> > > > On Mon, Mar 28, 2022 at 02:16:13PM +0100, Stefan Hajnoczi wrote:
> > > > > On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> > > > > > +static void virtio_queue_rqs(struct request **rqlist)
> > > > > > +{
> > > > > > +	struct request *req, *next, *prev = NULL;
> > > > > > +	struct request *requeue_list = NULL;
> > > > > > +
> > > > > > +	rq_list_for_each_safe(rqlist, req, next) {
> > > > > > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > > > > > +		unsigned long flags;
> > > > > > +		bool kick;
> > > > > > +
> > > > > > +		if (!virtblk_prep_rq_batch(vq, req)) {
> > > > > > +			rq_list_move(rqlist, &requeue_list, req, prev);
> > > > > > +			req = prev;
> > > > > > +
> > > > > > +			if (!req)
> > > > > > +				continue;
> > > > > > +		}
> > > > > > +
> > > > > > +		if (!next || req->mq_hctx != next->mq_hctx) {
> > > > > > +			spin_lock_irqsave(&vq->lock, flags);
> > > > > 
> > > > > Did you try calling virtblk_add_req() here to avoid acquiring and
> > > > > releasing the lock multiple times? In other words, do virtblk_prep_rq()
> > > > > but wait until we get here to do virtblk_add_req().
> > > > > 
> > > > > I don't know if it has any measurable effect on performance or maybe the
> > > > > code would become too complex, but I noticed that we're not fully
> > > > > exploiting batching.
> > > > 
> > > > I tried as you said. I called virtlblk_add_req() and added requests
> > > > of rqlist to virtqueue in this if statement with holding the lock
> > > > only once.
> > > > 
> > > > I attach the code at the end of this mail.
> > > > Please refer the code.
> > > > 
> > > > But I didn't see improvement. It showed slightly worse performance
> > > > than the current patch.
> > > 
> > > Okay, thanks for trying it!
> > > 
> > > > > > +			kick = virtqueue_kick_prepare(vq->vq);
> > > > > > +			spin_unlock_irqrestore(&vq->lock, flags);
> > > > > > +			if (kick)
> > > > > > +				virtqueue_notify(vq->vq);
> > > > > > +
> > > > > > +			req->rq_next = NULL;
> > > > 
> > > > Did you ask this part?
> > > > 
> > > > > > +			*rqlist = next;
> > > > > > +			prev = NULL;
> > > > > > +		} else
> > > > > > +			prev = req;
> > > > > 
> > > > > What guarantees that req is still alive after we called
> > > > > virtblk_add_req()? The device may have seen it and completed it already
> > > > > by the time we get here.
> > > > 
> > > > Isn't request completed after the kick?
> > > > 
> > > > If you asked about "req->rq_next = NULL",
> > > > I think it should be placed before
> > > > "kick = virtqueue_kick_prepare(vq->vq);"
> > > > 
> > > > -----------
> > > > 	req->rq_next = NULL;
> > > > 	kick = virtqueue_kick_prepare(vq->vq);
> > > > 	spin_unlock_irqrestore(&vq->lock, flags);
> > > > 	if (kick)
> > > > 		virtqueue_notify(vq->vq);
> > > > -----------
> > > 
> > > No, virtqueue_add_sgs() exposes vring descriptors to the device. The
> > > device may process immediately. In other words, VIRTIO devices may poll
> > > the vring instead of waiting for virtqueue_notify(). There is no
> > > guarantee that the request is alive until virtqueue_notify() is called.
> > > 
> > > The code has to handle the case where the request is completed during
> > > virtqueue_add_sgs().
> > 
> > Thanks for the explanation.
> > 
> > We should not use req again after virtblk_add_req().
> > I understand...
> > 
> > Then, as you commented in previous mail, is it ok that we do
> > virtblk_add_req() in "if (!next || req->mq_hctx != next->mq_hctx)"
> > statement to avoid use req again after virtblk_add_req() as below code?
> > 
> > In this code, It adds reqs to virtqueue in batch just before
> > virtqueue_notify(), and it doesn't use req again after calling
> > virtblk_add_req().
> > 
> > If it is fine, I will try it again.
> > This code is slightly different from the code I sent in previous mail.
> > 
> > ---
> > static void virtio_queue_rqs(struct request **rqlist)
> > ...
> > 	rq_list_for_each_safe(rqlist, req, next) {
> > ...
> > 		if (!next || req->mq_hctx != next->mq_hctx) {
> > 			// Cut the list at current req
> > 			req->rq_next = NULL;
> > 			// Add req list to virtqueue in batch with holding lock once
> > 			kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
> > 			if (kick)
> > 				virtqueue_notify(vq->vq);
> > 
> > 			// setup new req list. Don't use previous req again.
> > 			*rqlist = next;
> > 			prev = NULL;
> > ...
> 
> Yes, that sounds good.
> 
> (I noticed struct request has a reference count so that might be a way
> to keep requests alive, if necessary, but I haven't investigated. See
> req_ref_put_and_test() though it's not used by block drivers and maybe
> virtio-blk shouldn't mess with it either.)

I also think that using ref count is not a good idea.
I will send the next version soon.

Regards,
Suwan Kim
diff mbox series

Patch

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3d16f8b753e7..4a0a3b2d9caf 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -311,6 +311,28 @@  static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
 		virtqueue_notify(vq->vq);
 }
 
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+					struct virtio_blk *vblk,
+					struct request *req,
+					struct virtblk_req *vbr, int *num)
+{
+	blk_status_t status;
+
+	status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+	if (unlikely(status))
+		return status;
+
+	blk_mq_start_request(req);
+
+	*num = virtblk_map_data(hctx, req, vbr);
+	if (unlikely(*num < 0)) {
+		virtblk_cleanup_cmd(req);
+		return BLK_STS_RESOURCE;
+	}
+
+	return BLK_STS_OK;
+}
+
 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 			   const struct blk_mq_queue_data *bd)
 {
@@ -324,18 +346,10 @@  static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 	blk_status_t status;
 	int err;
 
-	status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+	status = virtblk_prep_rq(hctx, vblk, req, vbr, &num);
 	if (unlikely(status))
 		return status;
 
-	blk_mq_start_request(req);
-
-	num = virtblk_map_data(hctx, req, vbr);
-	if (unlikely(num < 0)) {
-		virtblk_cleanup_cmd(req);
-		return BLK_STS_RESOURCE;
-	}
-
 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
 	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
 	if (err) {
@@ -367,6 +381,66 @@  static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return BLK_STS_OK;
 }
 
+static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
+{
+	struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+	unsigned long flags;
+	int num, err;
+
+	req->mq_hctx->tags->rqs[req->tag] = req;
+
+	if (virtblk_prep_rq(req->mq_hctx, vblk, req, vbr, &num) != BLK_STS_OK)
+		return false;
+
+	spin_lock_irqsave(&vq->lock, flags);
+	err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
+	if (err) {
+		spin_unlock_irqrestore(&vq->lock, flags);
+		virtblk_unmap_data(req, vbr);
+		virtblk_cleanup_cmd(req);
+		return false;
+	}
+	spin_unlock_irqrestore(&vq->lock, flags);
+
+	return true;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+	struct request *req, *next, *prev = NULL;
+	struct request *requeue_list = NULL;
+
+	rq_list_for_each_safe(rqlist, req, next) {
+		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+		unsigned long flags;
+		bool kick;
+
+		if (!virtblk_prep_rq_batch(vq, req)) {
+			rq_list_move(rqlist, &requeue_list, req, prev);
+			req = prev;
+
+			if (!req)
+				continue;
+		}
+
+		if (!next || req->mq_hctx != next->mq_hctx) {
+			spin_lock_irqsave(&vq->lock, flags);
+			kick = virtqueue_kick_prepare(vq->vq);
+			spin_unlock_irqrestore(&vq->lock, flags);
+			if (kick)
+				virtqueue_notify(vq->vq);
+
+			req->rq_next = NULL;
+			*rqlist = next;
+			prev = NULL;
+		} else
+			prev = req;
+	}
+
+	*rqlist = requeue_list;
+}
+
 /* return id (s/n) string for *disk to *id_str
  */
 static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -823,6 +897,7 @@  static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 
 static const struct blk_mq_ops virtio_mq_ops = {
 	.queue_rq	= virtio_queue_rq,
+	.queue_rqs	= virtio_queue_rqs,
 	.commit_rqs	= virtio_commit_rqs,
 	.init_hctx	= virtblk_init_hctx,
 	.complete	= virtblk_request_done,