@@ -338,7 +338,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
EXPORT_SYMBOL(blk_rq_init);
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
- struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
+ struct blk_mq_tags *tags, unsigned int tag,
+ u64 alloc_time_ns, u64 start_time_ns)
{
struct blk_mq_ctx *ctx = data->ctx;
struct blk_mq_hw_ctx *hctx = data->hctx;
@@ -360,14 +361,11 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
}
rq->timeout = 0;
- if (blk_mq_need_time_stamp(rq))
- rq->start_time_ns = ktime_get_ns();
- else
- rq->start_time_ns = 0;
rq->part = NULL;
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
rq->alloc_time_ns = alloc_time_ns;
#endif
+ rq->start_time_ns = start_time_ns;
rq->io_start_time_ns = 0;
rq->stats_sectors = 0;
rq->nr_phys_segments = 0;
@@ -405,11 +403,15 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
struct request *rq;
unsigned long tag_mask;
int i, nr = 0;
+ u64 start_time_ns = 0;
tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
if (unlikely(!tag_mask))
return NULL;
+ if (blk_mq_need_time_stamp(data->rq_flags))
+ start_time_ns = ktime_get_ns();
+
tags = blk_mq_tags_from_data(data);
for (i = 0; tag_mask; i++) {
if (!(tag_mask & (1UL << i)))
@@ -417,7 +419,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
tag = tag_offset + i;
prefetch(tags->static_rqs[tag]);
tag_mask &= ~(1UL << i);
- rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
+ rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns, start_time_ns);
rq_list_add(data->cached_rq, rq);
nr++;
}
@@ -431,7 +433,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{
struct request_queue *q = data->q;
- u64 alloc_time_ns = 0;
+ u64 alloc_time_ns = 0, start_time_ns = 0;
struct request *rq;
unsigned int tag;
@@ -514,8 +516,11 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
goto retry;
}
+ if (blk_mq_need_time_stamp(data->rq_flags))
+ start_time_ns = ktime_get_ns();
+
return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
- alloc_time_ns);
+ alloc_time_ns, start_time_ns);
}
static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
@@ -1004,7 +1009,7 @@ static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
- if (blk_mq_need_time_stamp(rq))
+ if (blk_mq_need_time_stamp(rq->rq_flags))
__blk_mq_end_request_acct(rq, ktime_get_ns());
if (rq->end_io) {
@@ -834,9 +834,9 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib);
* Only need start/end time stamping if we have iostat or
* blk stats enabled, or using an IO scheduler.
*/
-static inline bool blk_mq_need_time_stamp(struct request *rq)
+static inline bool blk_mq_need_time_stamp(req_flags_t rq_flags)
{
- return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
+ return (rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
}
static inline bool blk_mq_is_reserved_rq(struct request *rq)
@@ -860,7 +860,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
iob->complete = complete;
else if (iob->complete != complete)
return false;
- iob->need_ts |= blk_mq_need_time_stamp(req);
+ iob->need_ts |= blk_mq_need_time_stamp(req->rq_flags);
rq_list_add(&iob->req_list, req);
return true;
}