@@ -489,6 +489,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
(unsigned long long) start << SECTOR_SHIFT);
limits->zoned = blk_queue_zoned_model(q);
+ blk_queue_max_allocate_sectors(q, 0);
return 0;
}
@@ -1548,6 +1549,7 @@ int dm_calculate_queue_limits(struct dm_table *table,
dm_device_name(table->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
+ limits->max_allocate_sectors = 0;
/*
* FIXME: this should likely be moved to blk_stack_limits(), would
@@ -798,5 +798,6 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
+ blk_queue_max_allocate_sectors(mddev->queue);
}
#endif /* _MD_MD_H */
Since dm inherits limits from underlining block devices, this patch directly disables max_allocate_sectors for dm till full allocation support is implemented. This prevents high-level primitives (generic_make_request_checks(), __blkdev_issue_write_zeroes(), ...) from sending REQ_ALLOCATE requests. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> --- drivers/md/dm-table.c | 2 ++ drivers/md/md.h | 1 + 2 files changed, 3 insertions(+) -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel