100644
@@ -1641,7 +1641,7 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely(!(bio->bi_rw & (REQ_DISCARD | REQ_SANITIZE)) &&
nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
@@ -1689,6 +1689,14 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
+ if ((bio->bi_rw & REQ_SANITIZE) &&
+ (!blk_queue_sanitize(q))) {
+ pr_info("%s - got a SANITIZE request but the queue "
+ "doesn't support sanitize requests", __func__);
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */
@@ -1794,7 +1802,8 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (bio_has_data(bio) &&
+ (!(rw & (REQ_DISCARD | REQ_SANITIZE)))) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -1840,7 +1849,7 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq) {
- if (rq->cmd_flags & REQ_DISCARD)
+ if (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))
return 0;
if (blk_rq_sectors(rq) > queue_max_sectors(q) || diff --git
a/block/blk-lib.c b/block/blk-lib.c index 2b461b4..280d63e 100644
@@ -115,6 +115,57 @@ int blkdev_issue_discard(struct block_device *bdev,
sector_t sector, EXPORT_SYMBOL(blkdev_issue_discard);
/**
+ * blkdev_issue_sanitize - queue a sanitize request
+ * @bdev: blockdev to issue sanitize for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issue a sanitize request for the specified block device
+ */
+int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask) {
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = REQ_WRITE | REQ_SANITIZE;
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_sanitize(q)) {
+ pr_err("%s - card doesn't support sanitize", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_sanitize);
+
+/**
* blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
100644
@@ -477,6 +477,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio
*bio)
if (!rq_mergeable(rq))
return false;
+ /* don't merge file system requests and sanitize requests */
+ if ((req->cmd_flags & REQ_SANITIZE) != (next->cmd_flags &
REQ_SANITIZE))
+ return false;
+
/* don't merge file system requests and discard requests */
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
return false;
100644
@@ -563,7 +563,7 @@ void __elv_add_request(struct request_queue *q, struct
request *rq, int where)
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD)) {
+ (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
@@ -132,6 +132,11 @@ static int blk_ioctl_discard(struct block_device *bdev,
uint64_t start,
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); }
+static int blk_ioctl_sanitize(struct block_device *bdev) {
+ return blkdev_issue_sanitize(bdev, GFP_KERNEL); }
+
static int put_ushort(unsigned long arg, unsigned short val) {
return put_user(val, (unsigned short __user *)arg); @@ -234,6
+239,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd,
set_device_ro(bdev, n);
return 0;
+ case BLKSANITIZE:
+ ret = blk_ioctl_sanitize(bdev);
+ break;
+
case BLKDISCARD:
case BLKSECDISCARD: {
uint64_t range[2];
0edb65d..e58e0db 100644
@@ -160,6 +160,7 @@ enum rq_flag_bits {
__REQ_FLUSH_SEQ, /* request for flush sequence */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately
*/
+ __REQ_SANITIZE, /* sanitize */
__REQ_NR_BITS, /* stops here */
};
@@ -171,13 +172,15 @@ enum rq_flag_bits {
#define REQ_META (1 << __REQ_META)
#define REQ_PRIO (1 << __REQ_PRIO)
#define REQ_DISCARD (1 << __REQ_DISCARD)
+#define REQ_SANITIZE (1 << __REQ_SANITIZE)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
- REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+ REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE | \
+ REQ_SANITIZE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
ba43f40..1db6c91 100644
@@ -438,6 +438,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
+#define QUEUE_FLAG_SANITIZE 19 /* supports SANITIZE */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -518,6 +519,7 @@ static inline void queue_flag_clear(unsigned int flag,
struct request_queue *q)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD,
&(q)->queue_flags)
+#define blk_queue_sanitize(q) test_bit(QUEUE_FLAG_SANITIZE,
&(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
@@ -971,6 +973,7 @@ static inline struct request
*blk_map_queue_find_tag(struct blk_queue_tag *bqt, extern int
blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int
blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_sanitize(struct block_device *bdev, gfp_t
+gfp_mask);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
100644
@@ -333,6 +333,7 @@ struct inodes_stat_t { #define BLKDISCARDZEROES
_IO(0x12,124) #define BLKSECDISCARD _IO(0x12,125) #define BLKROTATIONAL
_IO(0x12,126)
+#define BLKSANITIZE _IO(0x12, 127)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
c0bd030..06f7940 100644
@@ -1788,6 +1788,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
rwbs[i++] = 'W';
else if (rw & REQ_DISCARD)
rwbs[i++] = 'D';
+ else if (rw & REQ_SANITIZE)
+ rwbs[i++] = 'Z';
else if (bytes)
rwbs[i++] = 'R';
else