diff mbox series

[V3] block: fix the DISCARD request merge

Message ID 1540350450-15208-1-git-send-email-jianchao.w.wang@oracle.com (mailing list archive)
State New, archived
Headers show
Series [V3] block: fix the DISCARD request merge | expand

Commit Message

jianchao.wang Oct. 24, 2018, 3:07 a.m. UTC
There are two cases when handle DISCARD merge.
If max_discard_segments == 1, the bios/requests need to be contiguous
to merge. If max_discard_segments > 1, it takes every bio as a range
and different range needn't to be contiguous.

But now, attempt_merge screws this up. It always consider contiguity
for DISCARD for the case max_discard_segments > 1 and cannot merge
contiguous DISCARD for the case max_discard_segments == 1, because
rq_attempt_discard_merge always returns false in this case.
This patch fixes both of the two cases above.

Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---

V3:
 - Introduce blk_discard_mergable into attempt_merge and
   blk_try_merge.
 - Some comment changes.

V2:
 - Add max_discard_segments > 1 checking in attempt_merge.
 - Change patch title and comment.
 - Add more comment in attempt_merge

 block/blk-merge.c | 34 ++++++++++++++++++++++++----------
 1 file changed, 24 insertions(+), 10 deletions(-)

Comments

Christoph Hellwig Oct. 24, 2018, 11:22 a.m. UTC | #1
> -	/*
> -	 * not contiguous
> -	 */
> -	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
> -		return NULL;
> -

As suggested previously I think we want something like blk_try_merge
and its return values here.  Maybe a blk_try_req_merge that takes
requests instead of bios.
jianchao.wang Oct. 26, 2018, 8:05 a.m. UTC | #2
Would anyone please take a look at this ?

Thanks in advance.
Jianchao

On 10/24/18 11:07 AM, Jianchao Wang wrote:
> There are two cases when handle DISCARD merge.
> If max_discard_segments == 1, the bios/requests need to be contiguous
> to merge. If max_discard_segments > 1, it takes every bio as a range
> and different range needn't to be contiguous.
> 
> But now, attempt_merge screws this up. It always consider contiguity
> for DISCARD for the case max_discard_segments > 1 and cannot merge
> contiguous DISCARD for the case max_discard_segments == 1, because
> rq_attempt_discard_merge always returns false in this case.
> This patch fixes both of the two cases above.
> 
> Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
> ---
> 
> V3:
>  - Introduce blk_discard_mergable into attempt_merge and
>    blk_try_merge.
>  - Some comment changes.
> 
> V2:
>  - Add max_discard_segments > 1 checking in attempt_merge.
>  - Change patch title and comment.
>  - Add more comment in attempt_merge
> 
>  block/blk-merge.c | 34 ++++++++++++++++++++++++----------
>  1 file changed, 24 insertions(+), 10 deletions(-)
> 
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index 42a4674..b258de0 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -714,6 +714,22 @@ static void blk_account_io_merge(struct request *req)
>  		part_stat_unlock();
>  	}
>  }
> +/*
> + * Two cases of handling DISCARD merge:
> + * If max_discard_segments > 1, the driver takes every bio
> + * as a range and send them to controller together. The ranges
> + * needn't to be contiguous.
> + * Otherwise, the bios/requests will be handled as same as
> + * others which should be contiguous.
> + */
> +static inline bool blk_discard_mergable(struct request *req)
> +{
> +	if (req_op(req) == REQ_OP_DISCARD &&
> +	    queue_max_discard_segments(req->q) > 1)
> +		return true;
> +	else
> +		return false;
> +}
>  
>  /*
>   * For non-mq, this has to be called with the request spinlock acquired.
> @@ -731,12 +747,6 @@ static struct request *attempt_merge(struct request_queue *q,
>  	if (req_op(req) != req_op(next))
>  		return NULL;
>  
> -	/*
> -	 * not contiguous
> -	 */
> -	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
> -		return NULL;
> -
>  	if (rq_data_dir(req) != rq_data_dir(next)
>  	    || req->rq_disk != next->rq_disk
>  	    || req_no_special_merge(next))
> @@ -760,11 +770,16 @@ static struct request *attempt_merge(struct request_queue *q,
>  	 * counts here. Handle DISCARDs separately, as they
>  	 * have separate settings.
>  	 */
> -	if (req_op(req) == REQ_OP_DISCARD) {
> +
> +	if (blk_discard_mergable(req)) {
>  		if (!req_attempt_discard_merge(q, req, next))
>  			return NULL;
> -	} else if (!ll_merge_requests_fn(q, req, next))
> +	} else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) {
> +		if (!ll_merge_requests_fn(q, req, next))
> +			return NULL;
> +	} else {
>  		return NULL;
> +	}
>  
>  	/*
>  	 * If failfast settings disagree or any of the two is already
> @@ -888,8 +903,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
>  
>  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
>  {
> -	if (req_op(rq) == REQ_OP_DISCARD &&
> -	    queue_max_discard_segments(rq->q) > 1)
> +	if (blk_discard_mergable(rq))
>  		return ELEVATOR_DISCARD_MERGE;
>  	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
>  		return ELEVATOR_BACK_MERGE;
>
Christoph Hellwig Oct. 26, 2018, 8:07 a.m. UTC | #3
On Fri, Oct 26, 2018 at 04:05:00PM +0800, jianchao.wang wrote:
> Would anyone please take a look at this ?

I did take a look and reply to it..

> 
> Thanks in advance.
> Jianchao
> 
> On 10/24/18 11:07 AM, Jianchao Wang wrote:
> > There are two cases when handle DISCARD merge.
> > If max_discard_segments == 1, the bios/requests need to be contiguous
> > to merge. If max_discard_segments > 1, it takes every bio as a range
> > and different range needn't to be contiguous.
> > 
> > But now, attempt_merge screws this up. It always consider contiguity
> > for DISCARD for the case max_discard_segments > 1 and cannot merge
> > contiguous DISCARD for the case max_discard_segments == 1, because
> > rq_attempt_discard_merge always returns false in this case.
> > This patch fixes both of the two cases above.
> > 
> > Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
> > ---
> > 
> > V3:
> >  - Introduce blk_discard_mergable into attempt_merge and
> >    blk_try_merge.
> >  - Some comment changes.
> > 
> > V2:
> >  - Add max_discard_segments > 1 checking in attempt_merge.
> >  - Change patch title and comment.
> >  - Add more comment in attempt_merge
> > 
> >  block/blk-merge.c | 34 ++++++++++++++++++++++++----------
> >  1 file changed, 24 insertions(+), 10 deletions(-)
> > 
> > diff --git a/block/blk-merge.c b/block/blk-merge.c
> > index 42a4674..b258de0 100644
> > --- a/block/blk-merge.c
> > +++ b/block/blk-merge.c
> > @@ -714,6 +714,22 @@ static void blk_account_io_merge(struct request *req)
> >  		part_stat_unlock();
> >  	}
> >  }
> > +/*
> > + * Two cases of handling DISCARD merge:
> > + * If max_discard_segments > 1, the driver takes every bio
> > + * as a range and send them to controller together. The ranges
> > + * needn't to be contiguous.
> > + * Otherwise, the bios/requests will be handled as same as
> > + * others which should be contiguous.
> > + */
> > +static inline bool blk_discard_mergable(struct request *req)
> > +{
> > +	if (req_op(req) == REQ_OP_DISCARD &&
> > +	    queue_max_discard_segments(req->q) > 1)
> > +		return true;
> > +	else
> > +		return false;
> > +}
> >  
> >  /*
> >   * For non-mq, this has to be called with the request spinlock acquired.
> > @@ -731,12 +747,6 @@ static struct request *attempt_merge(struct request_queue *q,
> >  	if (req_op(req) != req_op(next))
> >  		return NULL;
> >  
> > -	/*
> > -	 * not contiguous
> > -	 */
> > -	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
> > -		return NULL;
> > -
> >  	if (rq_data_dir(req) != rq_data_dir(next)
> >  	    || req->rq_disk != next->rq_disk
> >  	    || req_no_special_merge(next))
> > @@ -760,11 +770,16 @@ static struct request *attempt_merge(struct request_queue *q,
> >  	 * counts here. Handle DISCARDs separately, as they
> >  	 * have separate settings.
> >  	 */
> > -	if (req_op(req) == REQ_OP_DISCARD) {
> > +
> > +	if (blk_discard_mergable(req)) {
> >  		if (!req_attempt_discard_merge(q, req, next))
> >  			return NULL;
> > -	} else if (!ll_merge_requests_fn(q, req, next))
> > +	} else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) {
> > +		if (!ll_merge_requests_fn(q, req, next))
> > +			return NULL;
> > +	} else {
> >  		return NULL;
> > +	}
> >  
> >  	/*
> >  	 * If failfast settings disagree or any of the two is already
> > @@ -888,8 +903,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
> >  
> >  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
> >  {
> > -	if (req_op(rq) == REQ_OP_DISCARD &&
> > -	    queue_max_discard_segments(rq->q) > 1)
> > +	if (blk_discard_mergable(rq))
> >  		return ELEVATOR_DISCARD_MERGE;
> >  	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
> >  		return ELEVATOR_BACK_MERGE;
> > 
---end quoted text---
jianchao.wang Oct. 26, 2018, 8:22 a.m. UTC | #4
On 10/26/18 4:07 PM, Christoph Hellwig wrote:
> On Fri, Oct 26, 2018 at 04:05:00PM +0800, jianchao.wang wrote:
>> Would anyone please take a look at this ?
> 
> I did take a look and reply to it..

Oh sorry, I missed it in my email client.
Got it from the https://lkml.org/lkml/2018/10/24/491
And we could also have a blk_merge_req_ok there. :)

Thanks
Jianchao
> 
>>
>> Thanks in advance.
>> Jianchao
>>
>> On 10/24/18 11:07 AM, Jianchao Wang wrote:
>>> There are two cases when handle DISCARD merge.
>>> If max_discard_segments == 1, the bios/requests need to be contiguous
>>> to merge. If max_discard_segments > 1, it takes every bio as a range
>>> and different range needn't to be contiguous.
>>>
>>> But now, attempt_merge screws this up. It always consider contiguity
>>> for DISCARD for the case max_discard_segments > 1 and cannot merge
>>> contiguous DISCARD for the case max_discard_segments == 1, because
>>> rq_attempt_discard_merge always returns false in this case.
>>> This patch fixes both of the two cases above.
>>>
>>> Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
>>> ---
>>>
>>> V3:
>>>  - Introduce blk_discard_mergable into attempt_merge and
>>>    blk_try_merge.
>>>  - Some comment changes.
>>>
>>> V2:
>>>  - Add max_discard_segments > 1 checking in attempt_merge.
>>>  - Change patch title and comment.
>>>  - Add more comment in attempt_merge
>>>
>>>  block/blk-merge.c | 34 ++++++++++++++++++++++++----------
>>>  1 file changed, 24 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/block/blk-merge.c b/block/blk-merge.c
>>> index 42a4674..b258de0 100644
>>> --- a/block/blk-merge.c
>>> +++ b/block/blk-merge.c
>>> @@ -714,6 +714,22 @@ static void blk_account_io_merge(struct request *req)
>>>  		part_stat_unlock();
>>>  	}
>>>  }
>>> +/*
>>> + * Two cases of handling DISCARD merge:
>>> + * If max_discard_segments > 1, the driver takes every bio
>>> + * as a range and send them to controller together. The ranges
>>> + * needn't to be contiguous.
>>> + * Otherwise, the bios/requests will be handled as same as
>>> + * others which should be contiguous.
>>> + */
>>> +static inline bool blk_discard_mergable(struct request *req)
>>> +{
>>> +	if (req_op(req) == REQ_OP_DISCARD &&
>>> +	    queue_max_discard_segments(req->q) > 1)
>>> +		return true;
>>> +	else
>>> +		return false;
>>> +}
>>>  
>>>  /*
>>>   * For non-mq, this has to be called with the request spinlock acquired.
>>> @@ -731,12 +747,6 @@ static struct request *attempt_merge(struct request_queue *q,
>>>  	if (req_op(req) != req_op(next))
>>>  		return NULL;
>>>  
>>> -	/*
>>> -	 * not contiguous
>>> -	 */
>>> -	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
>>> -		return NULL;
>>> -
>>>  	if (rq_data_dir(req) != rq_data_dir(next)
>>>  	    || req->rq_disk != next->rq_disk
>>>  	    || req_no_special_merge(next))
>>> @@ -760,11 +770,16 @@ static struct request *attempt_merge(struct request_queue *q,
>>>  	 * counts here. Handle DISCARDs separately, as they
>>>  	 * have separate settings.
>>>  	 */
>>> -	if (req_op(req) == REQ_OP_DISCARD) {
>>> +
>>> +	if (blk_discard_mergable(req)) {
>>>  		if (!req_attempt_discard_merge(q, req, next))
>>>  			return NULL;
>>> -	} else if (!ll_merge_requests_fn(q, req, next))
>>> +	} else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) {
>>> +		if (!ll_merge_requests_fn(q, req, next))
>>> +			return NULL;
>>> +	} else {
>>>  		return NULL;
>>> +	}
>>>  
>>>  	/*
>>>  	 * If failfast settings disagree or any of the two is already
>>> @@ -888,8 +903,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
>>>  
>>>  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
>>>  {
>>> -	if (req_op(rq) == REQ_OP_DISCARD &&
>>> -	    queue_max_discard_segments(rq->q) > 1)
>>> +	if (blk_discard_mergable(rq))
>>>  		return ELEVATOR_DISCARD_MERGE;
>>>  	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
>>>  		return ELEVATOR_BACK_MERGE;
>>>
> ---end quoted text---
>
diff mbox series

Patch

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 42a4674..b258de0 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -714,6 +714,22 @@  static void blk_account_io_merge(struct request *req)
 		part_stat_unlock();
 	}
 }
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+	if (req_op(req) == REQ_OP_DISCARD &&
+	    queue_max_discard_segments(req->q) > 1)
+		return true;
+	else
+		return false;
+}
 
 /*
  * For non-mq, this has to be called with the request spinlock acquired.
@@ -731,12 +747,6 @@  static struct request *attempt_merge(struct request_queue *q,
 	if (req_op(req) != req_op(next))
 		return NULL;
 
-	/*
-	 * not contiguous
-	 */
-	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
-		return NULL;
-
 	if (rq_data_dir(req) != rq_data_dir(next)
 	    || req->rq_disk != next->rq_disk
 	    || req_no_special_merge(next))
@@ -760,11 +770,16 @@  static struct request *attempt_merge(struct request_queue *q,
 	 * counts here. Handle DISCARDs separately, as they
 	 * have separate settings.
 	 */
-	if (req_op(req) == REQ_OP_DISCARD) {
+
+	if (blk_discard_mergable(req)) {
 		if (!req_attempt_discard_merge(q, req, next))
 			return NULL;
-	} else if (!ll_merge_requests_fn(q, req, next))
+	} else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) {
+		if (!ll_merge_requests_fn(q, req, next))
+			return NULL;
+	} else {
 		return NULL;
+	}
 
 	/*
 	 * If failfast settings disagree or any of the two is already
@@ -888,8 +903,7 @@  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 
 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 {
-	if (req_op(rq) == REQ_OP_DISCARD &&
-	    queue_max_discard_segments(rq->q) > 1)
+	if (blk_discard_mergable(rq))
 		return ELEVATOR_DISCARD_MERGE;
 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 		return ELEVATOR_BACK_MERGE;