@@ -293,26 +293,36 @@ static sector_t bio_secure_erase_limit(struct block_device *bdev)
(UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
}
+struct bio *blk_alloc_secure_erase_bio(struct block_device *bdev,
+ sector_t *sector, sector_t *nr_sects, gfp_t gfp)
+{
+ sector_t bio_sects = min(*nr_sects, bio_secure_erase_limit(bdev));
+ struct bio *bio;
+
+ if (!bio_sects)
+ return NULL;
+ bio = bio_alloc(bdev, 0, REQ_OP_SECURE_ERASE, gfp);
+ if (!bio)
+ return NULL;
+ bio->bi_iter.bi_sector = *sector;
+ bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
+ *sector += bio_sects;
+ *nr_sects -= bio_sects;
+ cond_resched();
+ return bio;
+}
+
int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp)
{
- unsigned int max_sectors = bio_secure_erase_limit(bdev);
- struct bio *bio = NULL;
+ struct bio *bio = NULL, *next;
struct blk_plug plug;
int ret = 0;
blk_start_plug(&plug);
- while (nr_sects) {
- unsigned int len = min_t(sector_t, nr_sects, max_sectors);
-
- bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
- bio->bi_iter.bi_sector = sector;
- bio->bi_iter.bi_size = len << SECTOR_SHIFT;
-
- sector += len;
- nr_sects -= len;
- cond_resched();
- }
+ while ((next = blk_alloc_secure_erase_bio(bdev, §or, &nr_sects,
+ gfp)))
+ bio = bio_chain_and_submit(bio, next);
if (bio) {
ret = submit_bio_wait(bio);
bio_put(bio);
@@ -835,5 +835,7 @@ struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
struct bio *blk_alloc_discard_bio(struct block_device *bdev,
sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
+struct bio *blk_alloc_secure_erase_bio(struct block_device *bdev,
+ sector_t *sector, sector_t *nr_sects, gfp_t gfp);
#endif /* __LINUX_BIO_H */
Factor out a helper from blkdev_issue_secure_erase that chews off as much as possible from a secure_erase range and allocates a bio for it. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-lib.c | 36 +++++++++++++++++++++++------------- include/linux/bio.h | 2 ++ 2 files changed, 25 insertions(+), 13 deletions(-)