@@ -1623,7 +1623,7 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (bio_has_data(bio) && !((rw & REQ_DISCARD) || (rw & REQ_TUNE))) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -27,6 +27,53 @@ static void bio_batch_end_io(struct bio *bio, int err)
}
/**
+ * blkdev_issue_tune - queue a tune request
+ * @bdev: blockdev to issue discard for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issue a tunable request from user.
+ */
+int blkdev_issue_tune(struct block_device *bdev, gfp_t gfp_mask)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+ int type = REQ_TUNE;
+
+ if (!blk_queue_tune(q))
+ return -ENXIO;
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ bio = bio_alloc(gfp_mask, 1);
+ if (bio == NULL)
+ return 0;
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+
+ bio->bi_bdev = bdev;
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_tune);
+
+/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
* @sector: start sector
@@ -112,6 +112,7 @@ static int blkdev_reread_part(struct block_device *bdev)
return res;
}
+
static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
uint64_t len, int secure)
{
@@ -214,6 +215,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EFAULT;
set_device_ro(bdev, n);
return 0;
+ case BLKTUNE:
+ return blkdev_issue_tune(bdev, GFP_KERNEL);
case BLKDISCARD:
case BLKSECDISCARD: {
@@ -791,6 +791,18 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
return 1;
}
+static int mmc_blk_issue_bkops(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+
+ spin_lock_irq(&md->lock);
+ __blk_end_request_all(req, 0);
+ spin_unlock_irq(&md->lock);
+
+ return 1;
+}
+
+
/*
* Reformat current write as a reliable write, supporting
* both legacy and the enhanced reliable write MMC cards.
@@ -1208,6 +1220,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
+ } else if (req && req->cmd_flags & REQ_TUNE) {
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ ret = mmc_blk_issue_bkops(mq, req);
+ mmc_card_set_need_bkops(card);
} else {
ret = mmc_blk_issue_rw_rq(mq, req);
}
@@ -133,6 +133,12 @@ struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
return sg;
}
+static void mmc_queue_setup_bkops(struct request_queue *q,
+ struct mmc_card *card)
+{
+ queue_flag_set_unlocked(QUEUE_FLAG_TUNE, q);
+}
+
static void mmc_queue_setup_discard(struct request_queue *q,
struct mmc_card *card)
{
@@ -191,6 +197,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
+ /* Set support bkops flag */
+ if (mmc_can_bkops(card))
+ mmc_queue_setup_bkops(mq->queue, card);
+
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
unsigned int bouncesz;
@@ -1738,6 +1738,15 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
}
EXPORT_SYMBOL(mmc_erase);
+int mmc_can_bkops(struct mmc_card *card)
+{
+ if (mmc_card_mmc(card) && card->ext_csd.bkops &&
+ card->ext_csd.hpi)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_bkops);
+
int mmc_can_erase(struct mmc_card *card)
{
if ((card->host->caps & MMC_CAP_ERASE) &&
@@ -125,6 +125,7 @@ enum rq_flag_bits {
__REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */
__REQ_DISCARD, /* request to discard sectors */
+ __REQ_TUNE, /* tunable request */
__REQ_NOIDLE, /* don't anticipate more IO after this one */
/* bio only flags */
@@ -161,13 +162,14 @@ enum rq_flag_bits {
#define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META)
#define REQ_DISCARD (1 << __REQ_DISCARD)
+#define REQ_TUNE (1 << __REQ_TUNE)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
- REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+ REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE | REQ_TUNE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
@@ -403,6 +403,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
+#define QUEUE_FLAG_TUNE 19 /* support tunable request */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -487,6 +488,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+#define blk_queue_tune(q) test_bit(QUEUE_FLAG_TUNE, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -929,6 +931,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+extern int blkdev_issue_tune(struct block_device *bdev, gfp_t gfp_mask);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
@@ -320,6 +320,7 @@ struct inodes_stat_t {
#define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125)
+#define BLKTUNE _IO(0x12,126)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -156,6 +156,7 @@ extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg);
extern int mmc_can_erase(struct mmc_card *card);
+extern int mmc_can_bkops(struct mmc_card *card);
extern int mmc_can_trim(struct mmc_card *card);
extern int mmc_can_secure_erase_trim(struct mmc_card *card);
extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,