diff mbox series

[2/5] brd: synchronize using page and free page with rcu

Message ID 20250418093826.3149293-3-yukuai1@huaweicloud.com (mailing list archive)
State New
Headers show
Series brd: discard bugfix | expand

Commit Message

Yu Kuai April 18, 2025, 9:38 a.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

Currently, after fetching the page by xa_load() in IO path, there is no
protection and page can be freed concurrently by discard:

cpu0
brd_submit_bio
 brd_do_bvec
  page = brd_lookup_page
                          cpu1
                          brd_submit_bio
                           brd_do_discard
                            page = __xa_erase()
                            __free_page()
  // page UAF

Fix the problem by protecting page with rcu.

Fixes: 9ead7efc6f3f ("brd: implement discard support")
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 drivers/block/brd.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a6e4f005cb76..740ed13faaff 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -208,6 +208,7 @@  static int brd_do_bvec(struct brd_device *brd, struct page *page,
 			goto out;
 	}
 
+	rcu_read_lock();
 	mem = kmap_local_page(page);
 	if (!op_is_write(opf)) {
 		copy_from_brd(mem + off, brd, sector, len);
@@ -217,11 +218,19 @@  static int brd_do_bvec(struct brd_device *brd, struct page *page,
 		copy_to_brd(brd, mem + off, sector, len);
 	}
 	kunmap_local(mem);
+	rcu_read_unlock();
 
 out:
 	return err;
 }
 
+static void brd_free_one_page(struct rcu_head *head)
+{
+	struct page *page = container_of(head, struct page, rcu_head);
+
+	__free_page(page);
+}
+
 static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
 {
 	sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
@@ -232,7 +241,7 @@  static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
 	while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
 		page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
 		if (page) {
-			__free_page(page);
+			call_rcu(&page->rcu_head, brd_free_one_page);
 			brd->brd_nr_pages--;
 		}
 		aligned_sector += PAGE_SECTORS;