Message ID | cb7c4181478c6483fa3d5fe499f53cb9dc937f44.1464844522.git.osandov@fb.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Jun 2, 2016 at 1:18 PM, Omar Sandoval <osandov@osandov.com> wrote: > From: Omar Sandoval <osandov@fb.com> > > Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues") > updated blk_mq_make_request() to set request_count even when > blk_queue_nomerges() returns true. However, blk_mq_make_request() only > does limited plugging and doesn't use request_count; > blk_sq_make_request() is the one that should have been fixed. Do that > and get rid of the unnecessary work in the mq version. > > Fixes: 0809e3ac6231 ("block: fix plug list flushing for nomerge queues") > Signed-off-by: Omar Sandoval <osandov@fb.com> Looks good catch. Reviewed-by: Ming Lei <tom.leiming@gmail.com> > --- > Applies to v4.7-rc1. > > block/blk-mq.c | 17 ++++++++--------- > 1 file changed, 8 insertions(+), 9 deletions(-) > > diff --git a/block/blk-mq.c b/block/blk-mq.c > index 29cbc1b5fbdb..f9b9049b1284 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) > > blk_queue_split(q, &bio, q->bio_split); > > - if (!is_flush_fua && !blk_queue_nomerges(q)) { > - if (blk_attempt_plug_merge(q, bio, &request_count, > - &same_queue_rq)) > - return BLK_QC_T_NONE; > - } else > - request_count = blk_plug_queued_count(q); > + if (!is_flush_fua && !blk_queue_nomerges(q) && > + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) > + return BLK_QC_T_NONE; > > rq = blk_mq_map_request(q, bio, &data); > if (unlikely(!rq)) > @@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) > > blk_queue_split(q, &bio, q->bio_split); > > - if (!is_flush_fua && !blk_queue_nomerges(q) && > - blk_attempt_plug_merge(q, bio, &request_count, NULL)) > - return BLK_QC_T_NONE; > + if (!is_flush_fua && !blk_queue_nomerges(q)) { > + if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) > + return BLK_QC_T_NONE; > + } else > + request_count = blk_plug_queued_count(q); > > rq = blk_mq_map_request(q, bio, &data); > if (unlikely(!rq)) > -- > 2.8.3 > > -- > To unsubscribe from this list: send the line "unsubscribe linux-block" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html
Omar Sandoval <osandov@osandov.com> writes: > From: Omar Sandoval <osandov@fb.com> > > Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues") > updated blk_mq_make_request() to set request_count even when > blk_queue_nomerges() returns true. However, blk_mq_make_request() only > does limited plugging and doesn't use request_count; > blk_sq_make_request() is the one that should have been fixed. Do that > and get rid of the unnecessary work in the mq version. > > Fixes: 0809e3ac6231 ("block: fix plug list flushing for nomerge queues") > Signed-off-by: Omar Sandoval <osandov@fb.com> How embarrassing! Thanks, Omar! Reviewed-by: Jeff Moyer <jmoyer@redhat.com> > --- > Applies to v4.7-rc1. > > block/blk-mq.c | 17 ++++++++--------- > 1 file changed, 8 insertions(+), 9 deletions(-) > > diff --git a/block/blk-mq.c b/block/blk-mq.c > index 29cbc1b5fbdb..f9b9049b1284 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) > > blk_queue_split(q, &bio, q->bio_split); > > - if (!is_flush_fua && !blk_queue_nomerges(q)) { > - if (blk_attempt_plug_merge(q, bio, &request_count, > - &same_queue_rq)) > - return BLK_QC_T_NONE; > - } else > - request_count = blk_plug_queued_count(q); > + if (!is_flush_fua && !blk_queue_nomerges(q) && > + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) > + return BLK_QC_T_NONE; > > rq = blk_mq_map_request(q, bio, &data); > if (unlikely(!rq)) > @@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) > > blk_queue_split(q, &bio, q->bio_split); > > - if (!is_flush_fua && !blk_queue_nomerges(q) && > - blk_attempt_plug_merge(q, bio, &request_count, NULL)) > - return BLK_QC_T_NONE; > + if (!is_flush_fua && !blk_queue_nomerges(q)) { > + if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) > + return BLK_QC_T_NONE; > + } else > + request_count = blk_plug_queued_count(q); > > rq = blk_mq_map_request(q, bio, &data); > if (unlikely(!rq)) -- To unsubscribe from this list: send the line "unsubscribe linux-block" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 06/01/2016 11:18 PM, Omar Sandoval wrote: > From: Omar Sandoval <osandov@fb.com> > > Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues") > updated blk_mq_make_request() to set request_count even when > blk_queue_nomerges() returns true. However, blk_mq_make_request() only > does limited plugging and doesn't use request_count; > blk_sq_make_request() is the one that should have been fixed. Do that > and get rid of the unnecessary work in the mq version. Thanks Omar, applied for this series.
diff --git a/block/blk-mq.c b/block/blk-mq.c index 29cbc1b5fbdb..f9b9049b1284 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(q, &bio, q->bio_split); - if (!is_flush_fua && !blk_queue_nomerges(q)) { - if (blk_attempt_plug_merge(q, bio, &request_count, - &same_queue_rq)) - return BLK_QC_T_NONE; - } else - request_count = blk_plug_queued_count(q); + if (!is_flush_fua && !blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) + return BLK_QC_T_NONE; rq = blk_mq_map_request(q, bio, &data); if (unlikely(!rq)) @@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(q, &bio, q->bio_split); - if (!is_flush_fua && !blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, &request_count, NULL)) - return BLK_QC_T_NONE; + if (!is_flush_fua && !blk_queue_nomerges(q)) { + if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) + return BLK_QC_T_NONE; + } else + request_count = blk_plug_queued_count(q); rq = blk_mq_map_request(q, bio, &data); if (unlikely(!rq))