Message ID | 20230327084103.21601-8-anuj20.g@samsung.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v8,1/9] block: Introduce queue limits for copy-offload support | expand |
On 3/27/23 17:40, Anuj Gupta wrote: > From: Nitesh Shetty <nj.shetty@samsung.com> > Drop the period at the end of the patch title. > Before enabling copy for dm target, check if underlying devices and > dm target support copy. Avoid split happening inside dm target. > Fail early if the request needs split, currently splitting copy > request is not supported. > > Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com> > --- > drivers/md/dm-table.c | 42 +++++++++++++++++++++++++++++++++++ > drivers/md/dm.c | 7 ++++++ > include/linux/device-mapper.h | 5 +++++ > 3 files changed, 54 insertions(+) > > diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c > index 7899f5fb4c13..45e894b9e3be 100644 > --- a/drivers/md/dm-table.c > +++ b/drivers/md/dm-table.c > @@ -1863,6 +1863,39 @@ static bool dm_table_supports_nowait(struct dm_table *t) > return true; > } > > +static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev, > + sector_t start, sector_t len, void *data) > +{ > + struct request_queue *q = bdev_get_queue(dev->bdev); > + > + return !blk_queue_copy(q); > +} > + > +static bool dm_table_supports_copy(struct dm_table *t) > +{ > + struct dm_target *ti; > + unsigned int i; > + > + for (i = 0; i < t->num_targets; i++) { > + ti = dm_table_get_target(t, i); > + > + if (!ti->copy_offload_supported) > + return false; > + > + /* > + * target provides copy support (as implied by setting > + * 'copy_offload_supported') > + * and it relies on _all_ data devices having copy support. > + */ > + if (!ti->type->iterate_devices || > + ti->type->iterate_devices(ti, > + device_not_copy_capable, NULL)) > + return false; > + } > + > + return true; > +} > + > static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, > sector_t start, sector_t len, void *data) > { > @@ -1945,6 +1978,15 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, > q->limits.discard_misaligned = 0; > } > > + if (!dm_table_supports_copy(t)) { > + blk_queue_flag_clear(QUEUE_FLAG_COPY, q); > + /* Must also clear copy limits... */ Not a useful comment. The code is clear. > + q->limits.max_copy_sectors = 0; > + q->limits.max_copy_sectors_hw = 0; > + } else { > + blk_queue_flag_set(QUEUE_FLAG_COPY, q); > + } > + > if (!dm_table_supports_secure_erase(t)) > q->limits.max_secure_erase_sectors = 0; > > diff --git a/drivers/md/dm.c b/drivers/md/dm.c > index 2d0f934ba6e6..08ec51000af8 100644 > --- a/drivers/md/dm.c > +++ b/drivers/md/dm.c > @@ -1693,6 +1693,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) > if (unlikely(ci->is_abnormal_io)) > return __process_abnormal_io(ci, ti); > > + if ((unlikely(op_is_copy(ci->bio->bi_opf)) && > + max_io_len(ti, ci->sector) < ci->sector_count)) { > + DMERR("Error, IO size(%u) > max target size(%llu)\n", > + ci->sector_count, max_io_len(ti, ci->sector)); > + return BLK_STS_IOERR; > + } > + > /* > * Only support bio polling for normal IO, and the target io is > * exactly inside the dm_io instance (verified in dm_poll_dm_io) > diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h > index 7975483816e4..44969a20295e 100644 > --- a/include/linux/device-mapper.h > +++ b/include/linux/device-mapper.h > @@ -380,6 +380,11 @@ struct dm_target { > * bio_set_dev(). NOTE: ideally a target should _not_ need this. > */ > bool needs_bio_set_dev:1; > + > + /* > + * copy offload is supported > + */ > + bool copy_offload_supported:1; > }; > > void *dm_per_bio_data(struct bio *bio, size_t data_size);
On Wed, Mar 29, 2023 at 05:59:49PM +0900, Damien Le Moal wrote: > On 3/27/23 17:40, Anuj Gupta wrote: > > From: Nitesh Shetty <nj.shetty@samsung.com> > > > > Drop the period at the end of the patch title. Acked > > > Before enabling copy for dm target, check if underlying devices and > > dm target support copy. Avoid split happening inside dm target. > > Fail early if the request needs split, currently splitting copy > > request is not supported. > > > > Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com> > > --- > > drivers/md/dm-table.c | 42 +++++++++++++++++++++++++++++++++++ > > drivers/md/dm.c | 7 ++++++ > > include/linux/device-mapper.h | 5 +++++ > > 3 files changed, 54 insertions(+) > > > > diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c > > index 7899f5fb4c13..45e894b9e3be 100644 > > --- a/drivers/md/dm-table.c > > +++ b/drivers/md/dm-table.c > > @@ -1863,6 +1863,39 @@ static bool dm_table_supports_nowait(struct dm_table *t) > > return true; > > } > > > > +static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev, > > + sector_t start, sector_t len, void *data) > > +{ > > + struct request_queue *q = bdev_get_queue(dev->bdev); > > + > > + return !blk_queue_copy(q); > > +} > > + > > +static bool dm_table_supports_copy(struct dm_table *t) > > +{ > > + struct dm_target *ti; > > + unsigned int i; > > + > > + for (i = 0; i < t->num_targets; i++) { > > + ti = dm_table_get_target(t, i); > > + > > + if (!ti->copy_offload_supported) > > + return false; > > + > > + /* > > + * target provides copy support (as implied by setting > > + * 'copy_offload_supported') > > + * and it relies on _all_ data devices having copy support. > > + */ > > + if (!ti->type->iterate_devices || > > + ti->type->iterate_devices(ti, > > + device_not_copy_capable, NULL)) > > + return false; > > + } > > + > > + return true; > > +} > > + > > static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, > > sector_t start, sector_t len, void *data) > > { > > @@ -1945,6 +1978,15 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, > > q->limits.discard_misaligned = 0; > > } > > > > + if (!dm_table_supports_copy(t)) { > > + blk_queue_flag_clear(QUEUE_FLAG_COPY, q); > > + /* Must also clear copy limits... */ > > Not a useful comment. The code is clear. Acked > > > + q->limits.max_copy_sectors = 0; > > + q->limits.max_copy_sectors_hw = 0; > > + } else { > > + blk_queue_flag_set(QUEUE_FLAG_COPY, q); > > + } > > + > > if (!dm_table_supports_secure_erase(t)) > > q->limits.max_secure_erase_sectors = 0; > > > > diff --git a/drivers/md/dm.c b/drivers/md/dm.c > > index 2d0f934ba6e6..08ec51000af8 100644 > > --- a/drivers/md/dm.c > > +++ b/drivers/md/dm.c > > @@ -1693,6 +1693,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) > > if (unlikely(ci->is_abnormal_io)) > > return __process_abnormal_io(ci, ti); > > > > + if ((unlikely(op_is_copy(ci->bio->bi_opf)) && > > + max_io_len(ti, ci->sector) < ci->sector_count)) { > > + DMERR("Error, IO size(%u) > max target size(%llu)\n", > > + ci->sector_count, max_io_len(ti, ci->sector)); > > + return BLK_STS_IOERR; > > + } > > + > > /* > > * Only support bio polling for normal IO, and the target io is > > * exactly inside the dm_io instance (verified in dm_poll_dm_io) > > diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h > > index 7975483816e4..44969a20295e 100644 > > --- a/include/linux/device-mapper.h > > +++ b/include/linux/device-mapper.h > > @@ -380,6 +380,11 @@ struct dm_target { > > * bio_set_dev(). NOTE: ideally a target should _not_ need this. > > */ > > bool needs_bio_set_dev:1; > > + > > + /* > > + * copy offload is supported > > + */ > > + bool copy_offload_supported:1; > > }; > > > > void *dm_per_bio_data(struct bio *bio, size_t data_size); > > -- > Damien Le Moal > Western Digital Research > > Thank you, Nitesh Shetty
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 7899f5fb4c13..45e894b9e3be 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1863,6 +1863,39 @@ static bool dm_table_supports_nowait(struct dm_table *t) return true; } +static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return !blk_queue_copy(q); +} + +static bool dm_table_supports_copy(struct dm_table *t) +{ + struct dm_target *ti; + unsigned int i; + + for (i = 0; i < t->num_targets; i++) { + ti = dm_table_get_target(t, i); + + if (!ti->copy_offload_supported) + return false; + + /* + * target provides copy support (as implied by setting + * 'copy_offload_supported') + * and it relies on _all_ data devices having copy support. + */ + if (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, + device_not_copy_capable, NULL)) + return false; + } + + return true; +} + static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1945,6 +1978,15 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.discard_misaligned = 0; } + if (!dm_table_supports_copy(t)) { + blk_queue_flag_clear(QUEUE_FLAG_COPY, q); + /* Must also clear copy limits... */ + q->limits.max_copy_sectors = 0; + q->limits.max_copy_sectors_hw = 0; + } else { + blk_queue_flag_set(QUEUE_FLAG_COPY, q); + } + if (!dm_table_supports_secure_erase(t)) q->limits.max_secure_erase_sectors = 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2d0f934ba6e6..08ec51000af8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1693,6 +1693,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) if (unlikely(ci->is_abnormal_io)) return __process_abnormal_io(ci, ti); + if ((unlikely(op_is_copy(ci->bio->bi_opf)) && + max_io_len(ti, ci->sector) < ci->sector_count)) { + DMERR("Error, IO size(%u) > max target size(%llu)\n", + ci->sector_count, max_io_len(ti, ci->sector)); + return BLK_STS_IOERR; + } + /* * Only support bio polling for normal IO, and the target io is * exactly inside the dm_io instance (verified in dm_poll_dm_io) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 7975483816e4..44969a20295e 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -380,6 +380,11 @@ struct dm_target { * bio_set_dev(). NOTE: ideally a target should _not_ need this. */ bool needs_bio_set_dev:1; + + /* + * copy offload is supported + */ + bool copy_offload_supported:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size);