diff mbox series

[v2,6/7] block: implement async wire write zeroes

Message ID 09c5ef75c04c17ee2fd551da50fc9aae3bfce50a.1724297388.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series implement async block discards/etc. via io_uring | expand

Commit Message

Pavel Begunkov Aug. 22, 2024, 3:35 a.m. UTC
Add another io_uring cmd for block layer implementing asynchronous write
zeroes. It reuses helpers we've added for async discards, and inherits
the code structure as well as all considerations in regards to page
cache races.

Suggested-by: Conrad Meyer <conradmeyer@meta.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 block/ioctl.c           | 68 +++++++++++++++++++++++++++++++++++++++++
 include/linux/blkdev.h  |  4 +++
 include/uapi/linux/fs.h |  1 +
 3 files changed, 73 insertions(+)

Comments

Christoph Hellwig Aug. 22, 2024, 6:50 a.m. UTC | #1
On Thu, Aug 22, 2024 at 04:35:56AM +0100, Pavel Begunkov wrote:
> Add another io_uring cmd for block layer implementing asynchronous write
> zeroes. It reuses helpers we've added for async discards, and inherits
> the code structure as well as all considerations in regards to page
> cache races.

Most comments from discard apply here as well.

> +static int blkdev_queue_cmd(struct io_uring_cmd *cmd, struct block_device *bdev,
> +			    uint64_t start, uint64_t len, sector_t limit,
> +			    blk_opf_t opf)

This feels a little over generic as it doesn't just queue any random
command.

> +static int blkdev_cmd_write_zeroes(struct io_uring_cmd *cmd,
> +				   struct block_device *bdev,
> +				   uint64_t start, uint64_t len, bool nowait)
> +{
> +	blk_opf_t opf = REQ_OP_WRITE_ZEROES | REQ_NOUNMAP;
> +
> +	if (nowait)
> +		opf |= REQ_NOWAIT;
> +	return blkdev_queue_cmd(cmd, bdev, start, len,
> +				bdev_write_zeroes_sectors(bdev), opf);

So no support for fallback to Write of zero page here?  That's probably
the case where the async offload is needed most.

> +struct bio *blk_alloc_write_zeroes_bio(struct block_device *bdev,
> +					sector_t *sector, sector_t *nr_sects,
> +					gfp_t gfp_mask);

Please keep this in block/blk.h, no need to expose it to the entire
kernel.
Pavel Begunkov Aug. 22, 2024, 1:09 p.m. UTC | #2
On 8/22/24 07:50, Christoph Hellwig wrote:
> On Thu, Aug 22, 2024 at 04:35:56AM +0100, Pavel Begunkov wrote:
>> Add another io_uring cmd for block layer implementing asynchronous write
>> zeroes. It reuses helpers we've added for async discards, and inherits
>> the code structure as well as all considerations in regards to page
>> cache races.
> 
> Most comments from discard apply here as well.
> 
>> +static int blkdev_queue_cmd(struct io_uring_cmd *cmd, struct block_device *bdev,
>> +			    uint64_t start, uint64_t len, sector_t limit,
>> +			    blk_opf_t opf)
> 
> This feels a little over generic as it doesn't just queue any random
> command.
> 
>> +static int blkdev_cmd_write_zeroes(struct io_uring_cmd *cmd,
>> +				   struct block_device *bdev,
>> +				   uint64_t start, uint64_t len, bool nowait)
>> +{
>> +	blk_opf_t opf = REQ_OP_WRITE_ZEROES | REQ_NOUNMAP;
>> +
>> +	if (nowait)
>> +		opf |= REQ_NOWAIT;
>> +	return blkdev_queue_cmd(cmd, bdev, start, len,
>> +				bdev_write_zeroes_sectors(bdev), opf);
> 
> So no support for fallback to Write of zero page here?  That's probably
> the case where the async offload is needed most.

Fair enough, but I believe it's more reasonable to have a separate
cmd for it instead of silently falling back.
diff mbox series

Patch

diff --git a/block/ioctl.c b/block/ioctl.c
index a9aaa7cb7f73..6f0676f21e7b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -776,6 +776,71 @@  static void bio_cmd_end(struct bio *bio)
 	bio_put(bio);
 }
 
+static int blkdev_queue_cmd(struct io_uring_cmd *cmd, struct block_device *bdev,
+			    uint64_t start, uint64_t len, sector_t limit,
+			    blk_opf_t opf)
+{
+	sector_t bs_mask = (bdev_logical_block_size(bdev) >> SECTOR_SHIFT) - 1;
+	sector_t sector = start >> SECTOR_SHIFT;
+	sector_t nr_sects = len >> SECTOR_SHIFT;
+	struct bio *prev = NULL, *bio;
+	int err;
+
+	if (!limit)
+		return -EOPNOTSUPP;
+
+	err = blk_validate_write(bdev, file_to_blk_mode(cmd->file), start, len);
+	if (err)
+		return err;
+
+	err = filemap_invalidate_pages(bdev->bd_mapping, start,
+					start + len - 1, opf & REQ_NOWAIT);
+	if (err)
+		return err;
+
+	limit = min(limit, (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
+	while (nr_sects) {
+		sector_t bio_sects = min(nr_sects, limit);
+
+		/*
+		 * Don't allow multi-bio non-blocking submissions as subsequent
+		 * bios may fail but we won't get direct feedback about that.
+		 * Normally, the caller should retry from a blocking context.
+		 */
+		if ((opf & REQ_NOWAIT) && bio_sects != nr_sects)
+			return -EAGAIN;
+
+		bio = bio_alloc(bdev, 0, opf, GFP_KERNEL);
+		if (!bio)
+			break;
+		bio->bi_iter.bi_sector = sector;
+		bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
+		sector += bio_sects;
+		nr_sects -= bio_sects;
+
+		prev = bio_chain_and_submit(prev, bio);
+	}
+	if (!prev)
+		return -EFAULT;
+
+	prev->bi_private = cmd;
+	prev->bi_end_io = bio_cmd_end;
+	submit_bio(prev);
+	return -EIOCBQUEUED;
+}
+
+static int blkdev_cmd_write_zeroes(struct io_uring_cmd *cmd,
+				   struct block_device *bdev,
+				   uint64_t start, uint64_t len, bool nowait)
+{
+	blk_opf_t opf = REQ_OP_WRITE_ZEROES | REQ_NOUNMAP;
+
+	if (nowait)
+		opf |= REQ_NOWAIT;
+	return blkdev_queue_cmd(cmd, bdev, start, len,
+				bdev_write_zeroes_sectors(bdev), opf);
+}
+
 static int blkdev_cmd_discard(struct io_uring_cmd *cmd,
 			      struct block_device *bdev,
 			      uint64_t start, uint64_t len, bool nowait)
@@ -843,6 +908,9 @@  int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
 	switch (cmd_op) {
 	case BLOCK_URING_CMD_DISCARD:
 		return blkdev_cmd_discard(cmd, bdev, start, len, bc->nowait);
+	case BLOCK_URING_CMD_WRITE_ZEROES:
+		return blkdev_cmd_write_zeroes(cmd, bdev, start, len,
+					       bc->nowait);
 	}
 	return -EINVAL;
 }
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e85ec73a07d5..82bbe1e3e278 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1095,6 +1095,10 @@  int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp);
 
+struct bio *blk_alloc_write_zeroes_bio(struct block_device *bdev,
+					sector_t *sector, sector_t *nr_sects,
+					gfp_t gfp_mask);
+
 #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
 #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
 #define BLKDEV_ZERO_KILLABLE	(1 << 2)  /* interruptible by fatal signals */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 0016e38ed33c..b9e20ce57a28 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -209,6 +209,7 @@  struct fsxattr {
  */
 
 #define BLOCK_URING_CMD_DISCARD			0
+#define BLOCK_URING_CMD_WRITE_ZEROES		1
 
 #define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
 #define FIBMAP	   _IO(0x00,1)	/* bmap access */