diff mbox series

[5/5] brd: zero data for discard that is not aligned to page

Message ID 20250418093826.3149293-6-yukuai1@huaweicloud.com (mailing list archive)
State New
Headers show
Series brd: discard bugfix | expand

Commit Message

Yu Kuai April 18, 2025, 9:38 a.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

Currently brd_do_discard() will just skip unaligned sectors, and in this
case user will still get old data after discard. Fix this by writing
zero data to unaligned sectors.

Fixes: 9ead7efc6f3f ("brd: implement discard support")
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 drivers/block/brd.c | 26 ++++++++++++++++++++++++--
 1 file changed, 24 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bf1e1b2a0d28..b5908703fb4b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -224,6 +224,21 @@  static int brd_do_bvec(struct brd_device *brd, struct page *page,
 	return err;
 }
 
+static void brd_zero_range(struct brd_device *brd, sector_t sector, u32 size)
+{
+	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
+	struct page *page;
+	void *dst;
+
+	page = brd_lookup_page(brd, sector);
+	if (!page)
+		return;
+
+	dst = kmap_local_page(page);
+	memset(dst + offset, 0, size);
+	kunmap_local(dst);
+}
+
 static void brd_free_one_page(struct rcu_head *head)
 {
 	struct page *page = container_of(head, struct page, rcu_head);
@@ -234,10 +249,17 @@  static void brd_free_one_page(struct rcu_head *head)
 static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
 {
 	sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
-	sector_t aligned_end = round_down(sector + (size >> SECTOR_SHIFT),
-					  PAGE_SECTORS);
+	sector_t sector_end = sector + (size >> SECTOR_SHIFT);
+	sector_t aligned_end = round_down(sector_end, PAGE_SECTORS);
 	struct page *page;
 
+	if (aligned_sector > sector)
+		brd_zero_range(brd, sector,
+			       (aligned_sector - sector) << SECTOR_SHIFT);
+	if (aligned_end < sector_end)
+		brd_zero_range(brd, aligned_end,
+			       (sector_end - aligned_end) << SECTOR_SHIFT);
+
 	if (aligned_end <= aligned_sector)
 		return;