diff mbox series

[1/4] block: factor out a helper to set logical/physical block size

Message ID 20250304121918.3159388-2-linan666@huaweicloud.com (mailing list archive)
State New
Headers show
Series bugfix of logical_block_size and make it configurable | expand

Checks

Context Check Description
shin/vmtest-linus-master-PR success PR summary
shin/vmtest-linus-master-VM_Test-0 success Logs for build-kernel

Commit Message

Li Nan March 4, 2025, 12:19 p.m. UTC
From: Li Nan <linan122@huawei.com>

There is no functional change.

Signed-off-by: Li Nan <linan122@huawei.com>
---
 include/linux/blkdev.h |  2 +
 block/blk-settings.c   | 85 ++++++++++++++++++++++++------------------
 2 files changed, 50 insertions(+), 37 deletions(-)

Comments

Bart Van Assche March 4, 2025, 2:32 p.m. UTC | #1
On 3/4/25 4:19 AM, linan666@huaweicloud.com wrote:
> +EXPORT_SYMBOL(blk_set_block_size);

This function is exported without documenting what the requirements are
for calling this function? Yikes.

Is my understanding correct that it is only safe to apply changes made 
with blk_set_block_size() by calling
queue_limits_commit_update_frozen()?

Thanks,

Bart.
diff mbox series

Patch

diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 248416ecd01c..516a7a8c0c3d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -996,6 +996,8 @@  static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
  */
 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
 extern void blk_set_stacking_limits(struct queue_limits *lim);
+extern int blk_set_block_size(struct queue_limits *t, unsigned int logical_block_size,
+		unsigned int physical_block_size);
 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 			    sector_t offset);
 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c44dadc35e1e..4a053c3d7c0a 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -645,6 +645,53 @@  static void blk_stack_atomic_writes_limits(struct queue_limits *t,
 	t->atomic_write_hw_boundary = 0;
 }
 
+int blk_set_block_size(struct queue_limits *t, unsigned int logical_block_size,
+		     unsigned int physical_block_size)
+{
+	int ret = 0;
+
+	t->logical_block_size = max(t->logical_block_size,
+				    logical_block_size);
+
+	t->physical_block_size = max(t->physical_block_size,
+				     physical_block_size);
+
+	/* Physical block size a multiple of the logical block size? */
+	if (t->physical_block_size & (t->logical_block_size - 1)) {
+		t->physical_block_size = t->logical_block_size;
+		t->flags |= BLK_FLAG_MISALIGNED;
+		ret = -1;
+	}
+
+	/* Minimum I/O a multiple of the physical block size? */
+	if (t->io_min & (t->physical_block_size - 1)) {
+		t->io_min = t->physical_block_size;
+		t->flags |= BLK_FLAG_MISALIGNED;
+		ret = -1;
+	}
+
+	/* Optimal I/O a multiple of the physical block size? */
+	if (t->io_opt & (t->physical_block_size - 1)) {
+		t->io_opt = 0;
+		t->flags |= BLK_FLAG_MISALIGNED;
+		ret = -1;
+	}
+
+	/* chunk_sectors a multiple of the physical block size? */
+	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
+		t->chunk_sectors = 0;
+		t->flags |= BLK_FLAG_MISALIGNED;
+		ret = -1;
+	}
+
+	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
+	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
+	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
+
+	return ret;
+}
+EXPORT_SYMBOL(blk_set_block_size);
+
 /**
  * blk_stack_limits - adjust queue_limits for stacked devices
  * @t:	the stacking driver limits (top device)
@@ -728,12 +775,6 @@  int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 		}
 	}
 
-	t->logical_block_size = max(t->logical_block_size,
-				    b->logical_block_size);
-
-	t->physical_block_size = max(t->physical_block_size,
-				     b->physical_block_size);
-
 	t->io_min = max(t->io_min, b->io_min);
 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
@@ -742,33 +783,7 @@  int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 	if (b->chunk_sectors)
 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
 
-	/* Physical block size a multiple of the logical block size? */
-	if (t->physical_block_size & (t->logical_block_size - 1)) {
-		t->physical_block_size = t->logical_block_size;
-		t->flags |= BLK_FLAG_MISALIGNED;
-		ret = -1;
-	}
-
-	/* Minimum I/O a multiple of the physical block size? */
-	if (t->io_min & (t->physical_block_size - 1)) {
-		t->io_min = t->physical_block_size;
-		t->flags |= BLK_FLAG_MISALIGNED;
-		ret = -1;
-	}
-
-	/* Optimal I/O a multiple of the physical block size? */
-	if (t->io_opt & (t->physical_block_size - 1)) {
-		t->io_opt = 0;
-		t->flags |= BLK_FLAG_MISALIGNED;
-		ret = -1;
-	}
-
-	/* chunk_sectors a multiple of the physical block size? */
-	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
-		t->chunk_sectors = 0;
-		t->flags |= BLK_FLAG_MISALIGNED;
-		ret = -1;
-	}
+	ret = blk_set_block_size(t, b->logical_block_size, b->physical_block_size);
 
 	/* Find lowest common alignment_offset */
 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
@@ -780,10 +795,6 @@  int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 		ret = -1;
 	}
 
-	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
-	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
-	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
-
 	/* Discard alignment and granularity */
 	if (b->discard_granularity) {
 		alignment = queue_limit_discard_alignment(b, start);