@@ -2608,7 +2608,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
- if (!blk_rq_is_passthrough(req))
+ if (blk_rq_accesses_medium(req))
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
@@ -255,7 +255,7 @@ static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) &&
- !blk_rq_is_passthrough(rq);
+ blk_rq_accesses_medium(rq);
}
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
@@ -635,16 +635,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
rq->q = q;
- if (rq->rq_flags & RQF_SOFTBARRIER) {
+ if (!blk_rq_accesses_medium(rq)) {
+ /* Do not schedule zone requests */
+ where = ELEVATOR_INSERT_FRONT;
+ } if (rq->rq_flags & RQF_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
- if (!blk_rq_is_passthrough(rq)) {
+ if (blk_rq_accesses_medium(rq)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
(where == ELEVATOR_INSERT_SORT ||
- where == ELEVATOR_INSERT_SORT_MERGE))
+ where == ELEVATOR_INSERT_SORT_MERGE)) {
where = ELEVATOR_INSERT_BACK;
+ }
switch (where) {
case ELEVATOR_INSERT_REQUEUE:
@@ -679,7 +683,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (elv_attempt_insert_merge(q, rq))
break;
case ELEVATOR_INSERT_SORT:
- BUG_ON(blk_rq_is_passthrough(rq));
+ BUG_ON(!blk_rq_accesses_medium(rq));
rq->rq_flags |= RQF_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
@@ -399,7 +399,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_mq_sched_request_inserted(rq);
- if (at_head || blk_rq_is_passthrough(rq)) {
+ if (at_head || !blk_rq_accesses_medium(rq)) {
if (at_head)
list_add(&rq->queuelist, &dd->dispatch);
else
@@ -720,7 +720,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
static inline bool blk_account_rq(struct request *rq)
{
- return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
+ return (rq->rq_flags & RQF_STARTED) && blk_rq_accesses_medium(rq);
}
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
@@ -796,7 +796,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
static inline bool rq_mergeable(struct request *rq)
{
- if (blk_rq_is_passthrough(rq))
+ if (!blk_rq_accesses_medium(rq))
return false;
if (req_op(rq) == REQ_OP_FLUSH)
@@ -1070,7 +1070,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
{
struct request_queue *q = rq->q;
- if (blk_rq_is_passthrough(rq))
+ if (!blk_rq_accesses_medium(rq))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors ||