diff mbox series

[RFC,v2,19/21] nvme-pci: precalculate number of DMA entries for each command

Message ID 8c5b0e5ab1716166fc93e76cb2d3e01ca9cf8769.1726138681.git.leon@kernel.org (mailing list archive)
State Handled Elsewhere
Headers show
Series Provide a new two step DMA API mapping API | expand

Commit Message

Leon Romanovsky Sept. 12, 2024, 11:15 a.m. UTC
From: Leon Romanovsky <leonro@nvidia.com>

Calculate the number of DMA entries for each command in the request in
advance.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/nvme/host/pci.c | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)
diff mbox series

Patch

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a9a66f184138..2b236b1d209e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -231,6 +231,7 @@  struct nvme_iod {
 	struct nvme_request req;
 	struct nvme_command cmd;
 	bool aborted;
+	u8 nr_dmas;
 	s8 nr_allocations;	/* PRP list pool allocations. 0 means small
 				   pool in use */
 	dma_addr_t first_dma;
@@ -766,6 +767,23 @@  static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
 	return BLK_STS_OK;
 }
 
+static u8 nvme_calc_num_dmas(struct request *req)
+{
+	struct bio_vec bv;
+	u8 nr_dmas;
+
+	if (blk_rq_nr_phys_segments(req) == 0)
+		return 0;
+
+	nr_dmas = DIV_ROUND_UP(blk_rq_payload_bytes(req), NVME_CTRL_PAGE_SIZE);
+	bv = req_bvec(req);
+	if (bv.bv_offset && (bv.bv_offset + bv.bv_len) >= NVME_CTRL_PAGE_SIZE)
+		/* Accommodate for unaligned first page */
+		nr_dmas++;
+
+	return nr_dmas;
+}
+
 static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -779,6 +797,8 @@  static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
 	if (ret)
 		return ret;
 
+	iod->nr_dmas = nvme_calc_num_dmas(req);
+
 	if (blk_rq_nr_phys_segments(req)) {
 		ret = nvme_map_data(dev, req, &iod->cmd);
 		if (ret)