@@ -1398,7 +1398,8 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
}
static struct request *blk_old_get_request(struct request_queue *q,
- unsigned int op, gfp_t gfp_mask)
+ unsigned int op, gfp_t gfp_mask,
+ unsigned int flags)
{
struct request *rq;
int ret = 0;
@@ -1408,8 +1409,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
/* create ioc upfront */
create_io_context(gfp_mask, q->node);
- ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ?
- BLK_REQ_NOWAIT : 0);
+ ret = blk_queue_enter(q, flags & BLK_REQ_BITS_MASK);
if (ret)
return ERR_PTR(ret);
spin_lock_irq(q->queue_lock);
@@ -1427,26 +1427,25 @@ static struct request *blk_old_get_request(struct request_queue *q,
return rq;
}
-struct request *blk_get_request(struct request_queue *q, unsigned int op,
- gfp_t gfp_mask)
+struct request *__blk_get_request(struct request_queue *q, unsigned int op,
+ gfp_t gfp_mask, unsigned int flags)
{
struct request *req;
+ flags |= gfp_mask & __GFP_DIRECT_RECLAIM ? 0 : BLK_REQ_NOWAIT;
if (q->mq_ops) {
- req = blk_mq_alloc_request(q, op,
- (gfp_mask & __GFP_DIRECT_RECLAIM) ?
- 0 : BLK_MQ_REQ_NOWAIT);
+ req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
q->mq_ops->initialize_rq_fn(req);
} else {
- req = blk_old_get_request(q, op, gfp_mask);
+ req = blk_old_get_request(q, op, gfp_mask, flags);
if (!IS_ERR(req) && q->initialize_rq_fn)
q->initialize_rq_fn(req);
}
return req;
}
-EXPORT_SYMBOL(blk_get_request);
+EXPORT_SYMBOL(__blk_get_request);
/**
* blk_requeue_request - put a request back on queue
@@ -384,8 +384,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
struct request *rq;
int ret;
- ret = blk_queue_enter(q, (flags & BLK_MQ_REQ_NOWAIT) ?
- BLK_REQ_NOWAIT : 0);
+ ret = blk_queue_enter(q, flags & BLK_REQ_BITS_MASK);
if (ret)
return ERR_PTR(ret);
@@ -197,9 +197,10 @@ void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
enum {
- BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
- BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
- BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
+ BLK_MQ_REQ_NOWAIT = BLK_REQ_NOWAIT, /* return when out of requests */
+ BLK_MQ_REQ_PREEMPT = BLK_REQ_PREEMPT, /* allocate for RQF_PREEMPT */
+ BLK_MQ_REQ_RESERVED = (1 << BLK_REQ_MQ_START_BIT), /* allocate from reserved pool */
+ BLK_MQ_REQ_INTERNAL = (1 << (BLK_REQ_MQ_START_BIT + 1)), /* allocate internal/sched tag */
};
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
@@ -860,7 +860,10 @@ enum {
/* passed to blk_queue_enter */
enum {
- BLK_REQ_NOWAIT = (1 << 0),
+ BLK_REQ_NOWAIT = (1 << 0),
+ BLK_REQ_PREEMPT = (1 << 1),
+ BLK_REQ_MQ_START_BIT = 2,
+ BLK_REQ_BITS_MASK = (1U << BLK_REQ_MQ_START_BIT) - 1,
};
extern unsigned long blk_max_low_pfn, blk_max_pfn;
@@ -945,8 +948,9 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
-extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- gfp_t gfp_mask);
+extern struct request *__blk_get_request(struct request_queue *,
+ unsigned int op, gfp_t gfp_mask,
+ unsigned int flags);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -997,6 +1001,13 @@ blk_status_t errno_to_blk_status(int errno);
bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+static inline struct request *blk_get_request(struct request_queue *q,
+ unsigned int op,
+ gfp_t gfp_mask)
+{
+ return __blk_get_request(q, op, gfp_mask, 0);
+}
+
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue; /* this is never NULL */