diff mbox series

[V2,14/17] block: ublk_drv: support to copy any part of request pages

Message ID 20230307141520.793891-15-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series io_uring/ublk: add IORING_OP_FUSED_CMD | expand

Commit Message

Ming Lei March 7, 2023, 2:15 p.m. UTC
Add 'offset' to 'struct ublk_map_data', so that ublk_copy_user_pages()
can be used to copy any sub-buffer(linear mapped) of the request.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 drivers/block/ublk_drv.c | 26 ++++++++++++++++++++++----
 1 file changed, 22 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index ba252932951c..c0cb99cb0c3c 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -511,19 +511,37 @@  static void ublk_copy_io_pages(struct ublk_io_iter *data,
 	}
 }
 
+static bool ublk_advance_io_iter(struct ublk_io_iter *iter, unsigned int offset)
+{
+	struct bio *bio = iter->bio;
+
+	for_each_bio(bio) {
+		if (bio->bi_iter.bi_size > offset) {
+			iter->bio = bio;
+			iter->iter = bio->bi_iter;
+			bio_advance_iter(iter->bio, &iter->iter, offset);
+			return true;
+		}
+		offset -= bio->bi_iter.bi_size;
+	}
+	return false;
+}
+
 /*
  * Copy data between request pages and io_iter, and 'offset'
  * is the start point of linear offset of request.
  */
 static size_t ublk_copy_user_pages(const struct request *req,
-		struct iov_iter *uiter, int dir)
+		unsigned offset, struct iov_iter *uiter, int dir)
 {
 	struct ublk_io_iter iter = {
 		.bio	= req->bio,
-		.iter	= req->bio->bi_iter,
 	};
 	size_t done = 0;
 
+	if (!ublk_advance_io_iter(&iter, offset))
+		return 0;
+
 	while (iov_iter_count(uiter) && iter.bio) {
 		unsigned nr_pages;
 		size_t len, off;
@@ -576,7 +594,7 @@  static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
 		import_single_range(dir, (void __user *)io->addr,
 				rq_bytes, &iov, &iter);
 
-		return ublk_copy_user_pages(req, &iter, dir);
+		return ublk_copy_user_pages(req, 0, &iter, dir);
 	}
 	return rq_bytes;
 }
@@ -596,7 +614,7 @@  static int ublk_unmap_io(const struct ublk_queue *ubq,
 
 		import_single_range(dir, (void __user *)io->addr,
 				io->res, &iov, &iter);
-		return ublk_copy_user_pages(req, &iter, dir);
+		return ublk_copy_user_pages(req, 0, &iter, dir);
 	}
 	return rq_bytes;
 }