@@ -98,6 +98,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0);
+ bio_set_prio(bio, get_current_ioprio());
bio->bi_iter.bi_size = req_sects << 9;
sector += req_sects;
@@ -191,6 +192,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio = blk_next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
+ bio_set_prio(bio, get_current_ioprio());
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
@@ -267,6 +269,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
+ bio_set_prio(bio, get_current_ioprio());
bio->bi_opf = REQ_OP_WRITE_ZEROES;
if (flags & BLKDEV_ZERO_NOUNMAP)
bio->bi_opf |= REQ_NOUNMAP;
@@ -319,6 +322,7 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
+ bio_set_prio(bio, get_current_ioprio());
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
while (nr_sects != 0) {
@@ -233,6 +233,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
while (sector < end_sector) {
bio = blk_next_bio(bio, 0, gfp_mask);
bio_set_dev(bio, bdev);
+ bio_set_prio(bio, get_current_ioprio());
/*
* Special case for the zone reset operation that reset all
@@ -84,6 +84,10 @@ enum {
NULL_Q_MQ = 2,
};
+static bool g_discard;
+module_param_named(discard, g_discard, bool, 0444);
+MODULE_PARM_DESC(discard, "Enable queue discard (default: false)");
+
static int g_no_sched;
module_param_named(no_sched, g_no_sched, int, 0444);
MODULE_PARM_DESC(no_sched, "No io scheduler");
@@ -156,6 +160,10 @@ static int g_max_sectors;
module_param_named(max_sectors, g_max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
+static unsigned int g_bounce_pfn;
+module_param_named(bounce_pfn, g_bounce_pfn, int, 0444);
+MODULE_PARM_DESC(bounce_pfn, "Queue Bounce limit (default: 0)");
+
static unsigned int nr_devices = 1;
module_param(nr_devices, uint, 0444);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
@@ -350,6 +358,7 @@ NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
+NULLB_DEVICE_ATTR(bounce_pfn, uint, NULL);
NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
@@ -468,6 +477,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_home_node,
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
+ &nullb_device_attr_bounce_pfn,
&nullb_device_attr_max_sectors,
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
@@ -539,7 +549,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors\n");
+ "memory_backed,discard,bounce_pfn,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -600,6 +610,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
+ dev->discard = g_discard;
dev->zone_size = g_zone_size;
dev->zone_capacity = g_zone_capacity;
dev->zone_nr_conv = g_zone_nr_conv;
@@ -1588,15 +1599,10 @@ static void null_del_dev(struct nullb *nullb)
static void null_config_discard(struct nullb *nullb)
{
+ blk_queue_max_write_zeroes_sectors(nullb->q, UINT_MAX >> 9);
if (nullb->dev->discard == false)
return;
- if (!nullb->dev->memory_backed) {
- nullb->dev->discard = false;
- pr_info("discard option is ignored without memory backing\n");
- return;
- }
-
if (nullb->dev->zoned) {
nullb->dev->discard = false;
pr_info("discard option is ignored in zoned mode\n");
@@ -1609,6 +1615,17 @@ static void null_config_discard(struct nullb *nullb)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
+static void null_config_bounce_pfn(struct nullb *nullb)
+{
+ if (nullb->dev->memory_backed && nullb->dev->bounce_pfn == false)
+ return;
+
+ if (!nullb->dev->memory_backed && !g_bounce_pfn)
+ return;
+
+ blk_queue_bounce_limit(nullb->q, nullb->dev->bounce_pfn);
+}
+
static const struct block_device_operations null_bio_ops = {
.owner = THIS_MODULE,
.submit_bio = null_submit_bio,
@@ -1881,6 +1898,7 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
null_config_discard(nullb);
+ null_config_bounce_pfn(nullb);
sprintf(nullb->disk_name, "nullb%d", nullb->index);
@@ -86,6 +86,7 @@ struct nullb_device {
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */
unsigned int max_sectors; /* Max sectors per command */
+ unsigned int bounce_pfn; /* bounce page frame number */
unsigned int irqmode; /* IRQ completion handler */
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> --- block/blk-lib.c | 4 ++++ block/blk-zoned.c | 1 + drivers/block/null_blk/main.c | 32 ++++++++++++++++++++++++------- drivers/block/null_blk/null_blk.h | 1 + 4 files changed, 31 insertions(+), 7 deletions(-)