diff mbox series

[1/5] brd: fix oops if write concurrent with discard

Message ID 20250418093826.3149293-2-yukuai1@huaweicloud.com (mailing list archive)
State New
Headers show
Series brd: discard bugfix | expand

Commit Message

Yu Kuai April 18, 2025, 9:38 a.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

User can issue write and discard concurrently, causing following BUG_ON:
cpu0:
brd_submit_bio
 brd_do_bve
  copy_to_brd_setup
   brd_insert_page
    xa_lock
    __xa_insert
    xa_unlock
				cpu1
				brd_submit_bio
				 brd_do_discard
				  xa_lock
				  page = __xa_erase
				  __free_page
				  xa_unlock
  copy_to_brd
   brd_lookup_page
    page = xa_load
    BUG_ON(!page)

Fix this problem by skipping the write, and user will get zero data
later if the page is not here.

Also fix following checkpatch warnings:
WARNING: Deprecated use of 'kmap_atomic', prefer 'kmap_local_page' instead
WARNING: Deprecated use of 'kunmap_atomic', prefer 'kunmap_local' instead

Fixes: 9ead7efc6f3f ("brd: implement discard support")
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 drivers/block/brd.c | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)
diff mbox series

Patch

diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 292f127cae0a..a6e4f005cb76 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -133,22 +133,22 @@  static void copy_to_brd(struct brd_device *brd, const void *src,
 
 	copy = min_t(size_t, n, PAGE_SIZE - offset);
 	page = brd_lookup_page(brd, sector);
-	BUG_ON(!page);
-
-	dst = kmap_atomic(page);
-	memcpy(dst + offset, src, copy);
-	kunmap_atomic(dst);
+	if (page) {
+		dst = kmap_local_page(page);
+		memcpy(dst + offset, src, copy);
+		kunmap_local(dst);
+	}
 
 	if (copy < n) {
 		src += copy;
 		sector += copy >> SECTOR_SHIFT;
 		copy = n - copy;
 		page = brd_lookup_page(brd, sector);
-		BUG_ON(!page);
-
-		dst = kmap_atomic(page);
-		memcpy(dst, src, copy);
-		kunmap_atomic(dst);
+		if (page) {
+			dst = kmap_local_page(page);
+			memcpy(dst, src, copy);
+			kunmap_local(dst);
+		}
 	}
 }
 
@@ -166,9 +166,9 @@  static void copy_from_brd(void *dst, struct brd_device *brd,
 	copy = min_t(size_t, n, PAGE_SIZE - offset);
 	page = brd_lookup_page(brd, sector);
 	if (page) {
-		src = kmap_atomic(page);
+		src = kmap_local_page(page);
 		memcpy(dst, src + offset, copy);
-		kunmap_atomic(src);
+		kunmap_local(src);
 	} else
 		memset(dst, 0, copy);
 
@@ -178,9 +178,9 @@  static void copy_from_brd(void *dst, struct brd_device *brd,
 		copy = n - copy;
 		page = brd_lookup_page(brd, sector);
 		if (page) {
-			src = kmap_atomic(page);
+			src = kmap_local_page(page);
 			memcpy(dst, src, copy);
-			kunmap_atomic(src);
+			kunmap_local(src);
 		} else
 			memset(dst, 0, copy);
 	}
@@ -208,7 +208,7 @@  static int brd_do_bvec(struct brd_device *brd, struct page *page,
 			goto out;
 	}
 
-	mem = kmap_atomic(page);
+	mem = kmap_local_page(page);
 	if (!op_is_write(opf)) {
 		copy_from_brd(mem + off, brd, sector, len);
 		flush_dcache_page(page);
@@ -216,7 +216,7 @@  static int brd_do_bvec(struct brd_device *brd, struct page *page,
 		flush_dcache_page(page);
 		copy_to_brd(brd, mem + off, sector, len);
 	}
-	kunmap_atomic(mem);
+	kunmap_local(mem);
 
 out:
 	return err;