diff mbox

[RFC,3/4] blk-mq: prepare for supporting runtime PM

Message ID 20180711162906.14271-4-ming.lei@redhat.com (mailing list archive)
State RFC, archived
Headers show

Commit Message

Ming Lei July 11, 2018, 4:29 p.m. UTC
This patch introduces blk_mq_pm_add_request() which is called after
allocating one request. Also blk_mq_pm_put_request() is introduced
and called after one request is freed.

For blk-mq, it can be quite expensive to accounting in-flight IOs,
so this patch calls pm_runtime_mark_last_busy() simply after each IO
is done, instead of doing that only after the last in-flight IO is done.
This way is still workable, since the active non-PM IO will be checked
in blk_pre_runtime_suspend(), and runtime suspend will be prevented
if there is any active non-PM IO.

Also makes blk_post_runtime_resume() to cover blk-mq.

Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: linux-pm@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Bart Van Assche <bart.vanassche@wdc.com>
Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: linux-scsi@vger.kernel.org
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-core.c | 12 ++++++++++--
 block/blk-mq.c   | 24 ++++++++++++++++++++++++
 2 files changed, 34 insertions(+), 2 deletions(-)

Comments

Christoph Hellwig July 11, 2018, 5:19 p.m. UTC | #1
> index c4b57d8806fe..bf66d561980d 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
>  int blk_pre_runtime_suspend(struct request_queue *q)
>  {
>  	int ret = 0;
> +	bool active;
>  
>  	if (!q->dev)
>  		return ret;
>  
>  	spin_lock_irq(q->queue_lock);
> -	if (q->nr_pending) {
> +	if (!q->mq_ops)
> +		active = !!q->nr_pending;
> +	else
> +		active = !blk_mq_pm_queue_idle(q);
> +	if (active) {

We shouldn't really need queue_lock for blk-mq.  Also the !! is not
really needed when assigning to a bool.

> +static void blk_mq_pm_add_request(struct request_queue *q, struct request *rq)
> +{
> +	if (!blk_mq_support_runtime_pm(q))
> +		return;
> +
> +	if (q->dev && !(rq->rq_flags & RQF_PM) &&
> +	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
> +		pm_request_resume(q->dev);

blk_mq_support_runtime_pm already checks for q->dev.   Also to mee
it sems just opencoding blk_mq_pm_add_request / blk_mq_pm_put_request
in the callers would seems more obvious.

> @@ -1841,6 +1863,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
>  
>  	rq_qos_track(q, rq, bio);
>  
> +	blk_mq_pm_add_request(q, rq);

This doesn't seem to handle passthrough requests and not actually
pair with blk_mq_free_request, is that intentional?
Ming Lei July 12, 2018, 9:58 a.m. UTC | #2
On Thu, Jul 12, 2018 at 12:29:05AM +0800, Ming Lei wrote:
> This patch introduces blk_mq_pm_add_request() which is called after
> allocating one request. Also blk_mq_pm_put_request() is introduced
> and called after one request is freed.
> 
> For blk-mq, it can be quite expensive to accounting in-flight IOs,
> so this patch calls pm_runtime_mark_last_busy() simply after each IO
> is done, instead of doing that only after the last in-flight IO is done.
> This way is still workable, since the active non-PM IO will be checked
> in blk_pre_runtime_suspend(), and runtime suspend will be prevented
> if there is any active non-PM IO.
> 
> Also makes blk_post_runtime_resume() to cover blk-mq.
> 
> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
> Cc: Alan Stern <stern@rowland.harvard.edu>
> Cc: linux-pm@vger.kernel.org
> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Bart Van Assche <bart.vanassche@wdc.com>
> Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
> Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
> Cc: linux-scsi@vger.kernel.org
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>  block/blk-core.c | 12 ++++++++++--
>  block/blk-mq.c   | 24 ++++++++++++++++++++++++
>  2 files changed, 34 insertions(+), 2 deletions(-)
> 
> diff --git a/block/blk-core.c b/block/blk-core.c
> index c4b57d8806fe..bf66d561980d 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
>  int blk_pre_runtime_suspend(struct request_queue *q)
>  {
>  	int ret = 0;
> +	bool active;
>  
>  	if (!q->dev)
>  		return ret;
>  
>  	spin_lock_irq(q->queue_lock);
> -	if (q->nr_pending) {
> +	if (!q->mq_ops)
> +		active = !!q->nr_pending;
> +	else
> +		active = !blk_mq_pm_queue_idle(q);
> +	if (active) {
>  		ret = -EBUSY;
>  		pm_runtime_mark_last_busy(q->dev);
>  	} else {

Looks there is one big issue, one new IO may come just after reading
'active' and before writing RPM_SUSPENDING to q->rpm_status, and both
the suspending and the new IO may be in-progress at the same time.


Thanks,
Ming
Ming Lei July 12, 2018, 12:28 p.m. UTC | #3
On Thu, Jul 12, 2018 at 05:58:28PM +0800, Ming Lei wrote:
> On Thu, Jul 12, 2018 at 12:29:05AM +0800, Ming Lei wrote:
> > This patch introduces blk_mq_pm_add_request() which is called after
> > allocating one request. Also blk_mq_pm_put_request() is introduced
> > and called after one request is freed.
> > 
> > For blk-mq, it can be quite expensive to accounting in-flight IOs,
> > so this patch calls pm_runtime_mark_last_busy() simply after each IO
> > is done, instead of doing that only after the last in-flight IO is done.
> > This way is still workable, since the active non-PM IO will be checked
> > in blk_pre_runtime_suspend(), and runtime suspend will be prevented
> > if there is any active non-PM IO.
> > 
> > Also makes blk_post_runtime_resume() to cover blk-mq.
> > 
> > Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
> > Cc: Alan Stern <stern@rowland.harvard.edu>
> > Cc: linux-pm@vger.kernel.org
> > Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> > Cc: Christoph Hellwig <hch@lst.de>
> > Cc: Bart Van Assche <bart.vanassche@wdc.com>
> > Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
> > Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
> > Cc: linux-scsi@vger.kernel.org
> > Signed-off-by: Ming Lei <ming.lei@redhat.com>
> > ---
> >  block/blk-core.c | 12 ++++++++++--
> >  block/blk-mq.c   | 24 ++++++++++++++++++++++++
> >  2 files changed, 34 insertions(+), 2 deletions(-)
> > 
> > diff --git a/block/blk-core.c b/block/blk-core.c
> > index c4b57d8806fe..bf66d561980d 100644
> > --- a/block/blk-core.c
> > +++ b/block/blk-core.c
> > @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
> >  int blk_pre_runtime_suspend(struct request_queue *q)
> >  {
> >  	int ret = 0;
> > +	bool active;
> >  
> >  	if (!q->dev)
> >  		return ret;
> >  
> >  	spin_lock_irq(q->queue_lock);
> > -	if (q->nr_pending) {
> > +	if (!q->mq_ops)
> > +		active = !!q->nr_pending;
> > +	else
> > +		active = !blk_mq_pm_queue_idle(q);
> > +	if (active) {
> >  		ret = -EBUSY;
> >  		pm_runtime_mark_last_busy(q->dev);
> >  	} else {
> 
> Looks there is one big issue, one new IO may come just after reading
> 'active' and before writing RPM_SUSPENDING to q->rpm_status, and both
> the suspending and the new IO may be in-progress at the same time.

One idea I thought of is to use seqlock to sync changing & reading q->rpm_status,
and looks read lock(read_seqcount_begin/read_seqcount_retry) shouldn't introduce
big cost in fast path.

Thanks,
Ming
Jens Axboe July 12, 2018, 2 p.m. UTC | #4
On 7/12/18 6:28 AM, Ming Lei wrote:
> On Thu, Jul 12, 2018 at 05:58:28PM +0800, Ming Lei wrote:
>> On Thu, Jul 12, 2018 at 12:29:05AM +0800, Ming Lei wrote:
>>> This patch introduces blk_mq_pm_add_request() which is called after
>>> allocating one request. Also blk_mq_pm_put_request() is introduced
>>> and called after one request is freed.
>>>
>>> For blk-mq, it can be quite expensive to accounting in-flight IOs,
>>> so this patch calls pm_runtime_mark_last_busy() simply after each IO
>>> is done, instead of doing that only after the last in-flight IO is done.
>>> This way is still workable, since the active non-PM IO will be checked
>>> in blk_pre_runtime_suspend(), and runtime suspend will be prevented
>>> if there is any active non-PM IO.
>>>
>>> Also makes blk_post_runtime_resume() to cover blk-mq.
>>>
>>> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
>>> Cc: Alan Stern <stern@rowland.harvard.edu>
>>> Cc: linux-pm@vger.kernel.org
>>> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
>>> Cc: Christoph Hellwig <hch@lst.de>
>>> Cc: Bart Van Assche <bart.vanassche@wdc.com>
>>> Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
>>> Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
>>> Cc: linux-scsi@vger.kernel.org
>>> Signed-off-by: Ming Lei <ming.lei@redhat.com>
>>> ---
>>>  block/blk-core.c | 12 ++++++++++--
>>>  block/blk-mq.c   | 24 ++++++++++++++++++++++++
>>>  2 files changed, 34 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/block/blk-core.c b/block/blk-core.c
>>> index c4b57d8806fe..bf66d561980d 100644
>>> --- a/block/blk-core.c
>>> +++ b/block/blk-core.c
>>> @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
>>>  int blk_pre_runtime_suspend(struct request_queue *q)
>>>  {
>>>  	int ret = 0;
>>> +	bool active;
>>>  
>>>  	if (!q->dev)
>>>  		return ret;
>>>  
>>>  	spin_lock_irq(q->queue_lock);
>>> -	if (q->nr_pending) {
>>> +	if (!q->mq_ops)
>>> +		active = !!q->nr_pending;
>>> +	else
>>> +		active = !blk_mq_pm_queue_idle(q);
>>> +	if (active) {
>>>  		ret = -EBUSY;
>>>  		pm_runtime_mark_last_busy(q->dev);
>>>  	} else {
>>
>> Looks there is one big issue, one new IO may come just after reading
>> 'active' and before writing RPM_SUSPENDING to q->rpm_status, and both
>> the suspending and the new IO may be in-progress at the same time.
> 
> One idea I thought of is to use seqlock to sync changing & reading q->rpm_status,
> and looks read lock(read_seqcount_begin/read_seqcount_retry) shouldn't introduce
> big cost in fast path.

Let's please keep in mind that this is runtime pm stuff. Better to
make the rules relaxed around it, instead of adding synchronization.
Ming Lei July 12, 2018, 9:32 p.m. UTC | #5
On Thu, Jul 12, 2018 at 10:00 PM, Jens Axboe <axboe@kernel.dk> wrote:
> On 7/12/18 6:28 AM, Ming Lei wrote:
>> On Thu, Jul 12, 2018 at 05:58:28PM +0800, Ming Lei wrote:
>>> On Thu, Jul 12, 2018 at 12:29:05AM +0800, Ming Lei wrote:
>>>> This patch introduces blk_mq_pm_add_request() which is called after
>>>> allocating one request. Also blk_mq_pm_put_request() is introduced
>>>> and called after one request is freed.
>>>>
>>>> For blk-mq, it can be quite expensive to accounting in-flight IOs,
>>>> so this patch calls pm_runtime_mark_last_busy() simply after each IO
>>>> is done, instead of doing that only after the last in-flight IO is done.
>>>> This way is still workable, since the active non-PM IO will be checked
>>>> in blk_pre_runtime_suspend(), and runtime suspend will be prevented
>>>> if there is any active non-PM IO.
>>>>
>>>> Also makes blk_post_runtime_resume() to cover blk-mq.
>>>>
>>>> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
>>>> Cc: Alan Stern <stern@rowland.harvard.edu>
>>>> Cc: linux-pm@vger.kernel.org
>>>> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
>>>> Cc: Christoph Hellwig <hch@lst.de>
>>>> Cc: Bart Van Assche <bart.vanassche@wdc.com>
>>>> Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
>>>> Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
>>>> Cc: linux-scsi@vger.kernel.org
>>>> Signed-off-by: Ming Lei <ming.lei@redhat.com>
>>>> ---
>>>>  block/blk-core.c | 12 ++++++++++--
>>>>  block/blk-mq.c   | 24 ++++++++++++++++++++++++
>>>>  2 files changed, 34 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/block/blk-core.c b/block/blk-core.c
>>>> index c4b57d8806fe..bf66d561980d 100644
>>>> --- a/block/blk-core.c
>>>> +++ b/block/blk-core.c
>>>> @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
>>>>  int blk_pre_runtime_suspend(struct request_queue *q)
>>>>  {
>>>>     int ret = 0;
>>>> +   bool active;
>>>>
>>>>     if (!q->dev)
>>>>             return ret;
>>>>
>>>>     spin_lock_irq(q->queue_lock);
>>>> -   if (q->nr_pending) {
>>>> +   if (!q->mq_ops)
>>>> +           active = !!q->nr_pending;
>>>> +   else
>>>> +           active = !blk_mq_pm_queue_idle(q);
>>>> +   if (active) {
>>>>             ret = -EBUSY;
>>>>             pm_runtime_mark_last_busy(q->dev);
>>>>     } else {
>>>
>>> Looks there is one big issue, one new IO may come just after reading
>>> 'active' and before writing RPM_SUSPENDING to q->rpm_status, and both
>>> the suspending and the new IO may be in-progress at the same time.
>>
>> One idea I thought of is to use seqlock to sync changing & reading q->rpm_status,
>> and looks read lock(read_seqcount_begin/read_seqcount_retry) shouldn't introduce
>> big cost in fast path.
>
> Let's please keep in mind that this is runtime pm stuff. Better to
> make the rules relaxed around it, instead of adding synchronization.

But the race has to be avoided, otherwise IO may be failed. I don't
find any simple solution yet for avoiding the race without adding sync.

Any idea for avoiding the race without using sync like seqlock or others?


Thanks,
Ming Lei
Jens Axboe July 12, 2018, 9:44 p.m. UTC | #6
On 7/12/18 3:32 PM, Ming Lei wrote:
> On Thu, Jul 12, 2018 at 10:00 PM, Jens Axboe <axboe@kernel.dk> wrote:
>> On 7/12/18 6:28 AM, Ming Lei wrote:
>>> On Thu, Jul 12, 2018 at 05:58:28PM +0800, Ming Lei wrote:
>>>> On Thu, Jul 12, 2018 at 12:29:05AM +0800, Ming Lei wrote:
>>>>> This patch introduces blk_mq_pm_add_request() which is called after
>>>>> allocating one request. Also blk_mq_pm_put_request() is introduced
>>>>> and called after one request is freed.
>>>>>
>>>>> For blk-mq, it can be quite expensive to accounting in-flight IOs,
>>>>> so this patch calls pm_runtime_mark_last_busy() simply after each IO
>>>>> is done, instead of doing that only after the last in-flight IO is done.
>>>>> This way is still workable, since the active non-PM IO will be checked
>>>>> in blk_pre_runtime_suspend(), and runtime suspend will be prevented
>>>>> if there is any active non-PM IO.
>>>>>
>>>>> Also makes blk_post_runtime_resume() to cover blk-mq.
>>>>>
>>>>> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
>>>>> Cc: Alan Stern <stern@rowland.harvard.edu>
>>>>> Cc: linux-pm@vger.kernel.org
>>>>> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
>>>>> Cc: Christoph Hellwig <hch@lst.de>
>>>>> Cc: Bart Van Assche <bart.vanassche@wdc.com>
>>>>> Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
>>>>> Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
>>>>> Cc: linux-scsi@vger.kernel.org
>>>>> Signed-off-by: Ming Lei <ming.lei@redhat.com>
>>>>> ---
>>>>>  block/blk-core.c | 12 ++++++++++--
>>>>>  block/blk-mq.c   | 24 ++++++++++++++++++++++++
>>>>>  2 files changed, 34 insertions(+), 2 deletions(-)
>>>>>
>>>>> diff --git a/block/blk-core.c b/block/blk-core.c
>>>>> index c4b57d8806fe..bf66d561980d 100644
>>>>> --- a/block/blk-core.c
>>>>> +++ b/block/blk-core.c
>>>>> @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
>>>>>  int blk_pre_runtime_suspend(struct request_queue *q)
>>>>>  {
>>>>>     int ret = 0;
>>>>> +   bool active;
>>>>>
>>>>>     if (!q->dev)
>>>>>             return ret;
>>>>>
>>>>>     spin_lock_irq(q->queue_lock);
>>>>> -   if (q->nr_pending) {
>>>>> +   if (!q->mq_ops)
>>>>> +           active = !!q->nr_pending;
>>>>> +   else
>>>>> +           active = !blk_mq_pm_queue_idle(q);
>>>>> +   if (active) {
>>>>>             ret = -EBUSY;
>>>>>             pm_runtime_mark_last_busy(q->dev);
>>>>>     } else {
>>>>
>>>> Looks there is one big issue, one new IO may come just after reading
>>>> 'active' and before writing RPM_SUSPENDING to q->rpm_status, and both
>>>> the suspending and the new IO may be in-progress at the same time.
>>>
>>> One idea I thought of is to use seqlock to sync changing & reading q->rpm_status,
>>> and looks read lock(read_seqcount_begin/read_seqcount_retry) shouldn't introduce
>>> big cost in fast path.
>>
>> Let's please keep in mind that this is runtime pm stuff. Better to
>> make the rules relaxed around it, instead of adding synchronization.
> 
> But the race has to be avoided, otherwise IO may be failed. I don't
> find any simple solution yet for avoiding the race without adding sync.
> 
> Any idea for avoiding the race without using sync like seqlock or others?

I just don't want anything like this in the hot path. Why can't we
handle this similarly to how we handle request timeouts? It'll
potentially delay the suspend by a few seconds, but surely that can't be
a big deal. I don't see why we need to track this on a per-request
basis.
Ming Lei July 12, 2018, 11:15 p.m. UTC | #7
On Thu, Jul 12, 2018 at 03:44:05PM -0600, Jens Axboe wrote:
> On 7/12/18 3:32 PM, Ming Lei wrote:
> > On Thu, Jul 12, 2018 at 10:00 PM, Jens Axboe <axboe@kernel.dk> wrote:
> >> On 7/12/18 6:28 AM, Ming Lei wrote:
> >>> On Thu, Jul 12, 2018 at 05:58:28PM +0800, Ming Lei wrote:
> >>>> On Thu, Jul 12, 2018 at 12:29:05AM +0800, Ming Lei wrote:
> >>>>> This patch introduces blk_mq_pm_add_request() which is called after
> >>>>> allocating one request. Also blk_mq_pm_put_request() is introduced
> >>>>> and called after one request is freed.
> >>>>>
> >>>>> For blk-mq, it can be quite expensive to accounting in-flight IOs,
> >>>>> so this patch calls pm_runtime_mark_last_busy() simply after each IO
> >>>>> is done, instead of doing that only after the last in-flight IO is done.
> >>>>> This way is still workable, since the active non-PM IO will be checked
> >>>>> in blk_pre_runtime_suspend(), and runtime suspend will be prevented
> >>>>> if there is any active non-PM IO.
> >>>>>
> >>>>> Also makes blk_post_runtime_resume() to cover blk-mq.
> >>>>>
> >>>>> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
> >>>>> Cc: Alan Stern <stern@rowland.harvard.edu>
> >>>>> Cc: linux-pm@vger.kernel.org
> >>>>> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> >>>>> Cc: Christoph Hellwig <hch@lst.de>
> >>>>> Cc: Bart Van Assche <bart.vanassche@wdc.com>
> >>>>> Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
> >>>>> Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
> >>>>> Cc: linux-scsi@vger.kernel.org
> >>>>> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> >>>>> ---
> >>>>>  block/blk-core.c | 12 ++++++++++--
> >>>>>  block/blk-mq.c   | 24 ++++++++++++++++++++++++
> >>>>>  2 files changed, 34 insertions(+), 2 deletions(-)
> >>>>>
> >>>>> diff --git a/block/blk-core.c b/block/blk-core.c
> >>>>> index c4b57d8806fe..bf66d561980d 100644
> >>>>> --- a/block/blk-core.c
> >>>>> +++ b/block/blk-core.c
> >>>>> @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
> >>>>>  int blk_pre_runtime_suspend(struct request_queue *q)
> >>>>>  {
> >>>>>     int ret = 0;
> >>>>> +   bool active;
> >>>>>
> >>>>>     if (!q->dev)
> >>>>>             return ret;
> >>>>>
> >>>>>     spin_lock_irq(q->queue_lock);
> >>>>> -   if (q->nr_pending) {
> >>>>> +   if (!q->mq_ops)
> >>>>> +           active = !!q->nr_pending;
> >>>>> +   else
> >>>>> +           active = !blk_mq_pm_queue_idle(q);
> >>>>> +   if (active) {
> >>>>>             ret = -EBUSY;
> >>>>>             pm_runtime_mark_last_busy(q->dev);
> >>>>>     } else {
> >>>>
> >>>> Looks there is one big issue, one new IO may come just after reading
> >>>> 'active' and before writing RPM_SUSPENDING to q->rpm_status, and both
> >>>> the suspending and the new IO may be in-progress at the same time.
> >>>
> >>> One idea I thought of is to use seqlock to sync changing & reading q->rpm_status,
> >>> and looks read lock(read_seqcount_begin/read_seqcount_retry) shouldn't introduce
> >>> big cost in fast path.
> >>
> >> Let's please keep in mind that this is runtime pm stuff. Better to
> >> make the rules relaxed around it, instead of adding synchronization.
> > 
> > But the race has to be avoided, otherwise IO may be failed. I don't
> > find any simple solution yet for avoiding the race without adding sync.
> > 
> > Any idea for avoiding the race without using sync like seqlock or others?
> 
> I just don't want anything like this in the hot path. Why can't we
> handle this similarly to how we handle request timeouts? It'll
> potentially delay the suspend by a few seconds, but surely that can't be
> a big deal. I don't see why we need to track this on a per-request
> basis.

For legacy path, there is the queue lock, so no the race mentioned.

I guess you mean why we can't use RCU style to deal with this issue, so
we don't introduce cost in fast path, but the problem is that IO has
to be submitted to one active hardware, that is one invariant of runtime
PM.

So RCU/SRCU won't fix this issue because the rcu_read_lock sync nothing,
and we have to make sure that hardware is ready before dispatching IO to
hardware/driver. That is why I think sort of sync is required in IO path.

Thanks,
Ming
Jens Axboe July 13, 2018, 2:20 p.m. UTC | #8
On 7/12/18 5:15 PM, Ming Lei wrote:
> On Thu, Jul 12, 2018 at 03:44:05PM -0600, Jens Axboe wrote:
>> On 7/12/18 3:32 PM, Ming Lei wrote:
>>> On Thu, Jul 12, 2018 at 10:00 PM, Jens Axboe <axboe@kernel.dk> wrote:
>>>> On 7/12/18 6:28 AM, Ming Lei wrote:
>>>>> On Thu, Jul 12, 2018 at 05:58:28PM +0800, Ming Lei wrote:
>>>>>> On Thu, Jul 12, 2018 at 12:29:05AM +0800, Ming Lei wrote:
>>>>>>> This patch introduces blk_mq_pm_add_request() which is called after
>>>>>>> allocating one request. Also blk_mq_pm_put_request() is introduced
>>>>>>> and called after one request is freed.
>>>>>>>
>>>>>>> For blk-mq, it can be quite expensive to accounting in-flight IOs,
>>>>>>> so this patch calls pm_runtime_mark_last_busy() simply after each IO
>>>>>>> is done, instead of doing that only after the last in-flight IO is done.
>>>>>>> This way is still workable, since the active non-PM IO will be checked
>>>>>>> in blk_pre_runtime_suspend(), and runtime suspend will be prevented
>>>>>>> if there is any active non-PM IO.
>>>>>>>
>>>>>>> Also makes blk_post_runtime_resume() to cover blk-mq.
>>>>>>>
>>>>>>> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
>>>>>>> Cc: Alan Stern <stern@rowland.harvard.edu>
>>>>>>> Cc: linux-pm@vger.kernel.org
>>>>>>> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
>>>>>>> Cc: Christoph Hellwig <hch@lst.de>
>>>>>>> Cc: Bart Van Assche <bart.vanassche@wdc.com>
>>>>>>> Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
>>>>>>> Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
>>>>>>> Cc: linux-scsi@vger.kernel.org
>>>>>>> Signed-off-by: Ming Lei <ming.lei@redhat.com>
>>>>>>> ---
>>>>>>>  block/blk-core.c | 12 ++++++++++--
>>>>>>>  block/blk-mq.c   | 24 ++++++++++++++++++++++++
>>>>>>>  2 files changed, 34 insertions(+), 2 deletions(-)
>>>>>>>
>>>>>>> diff --git a/block/blk-core.c b/block/blk-core.c
>>>>>>> index c4b57d8806fe..bf66d561980d 100644
>>>>>>> --- a/block/blk-core.c
>>>>>>> +++ b/block/blk-core.c
>>>>>>> @@ -3804,12 +3804,17 @@ EXPORT_SYMBOL(blk_pm_runtime_init);
>>>>>>>  int blk_pre_runtime_suspend(struct request_queue *q)
>>>>>>>  {
>>>>>>>     int ret = 0;
>>>>>>> +   bool active;
>>>>>>>
>>>>>>>     if (!q->dev)
>>>>>>>             return ret;
>>>>>>>
>>>>>>>     spin_lock_irq(q->queue_lock);
>>>>>>> -   if (q->nr_pending) {
>>>>>>> +   if (!q->mq_ops)
>>>>>>> +           active = !!q->nr_pending;
>>>>>>> +   else
>>>>>>> +           active = !blk_mq_pm_queue_idle(q);
>>>>>>> +   if (active) {
>>>>>>>             ret = -EBUSY;
>>>>>>>             pm_runtime_mark_last_busy(q->dev);
>>>>>>>     } else {
>>>>>>
>>>>>> Looks there is one big issue, one new IO may come just after reading
>>>>>> 'active' and before writing RPM_SUSPENDING to q->rpm_status, and both
>>>>>> the suspending and the new IO may be in-progress at the same time.
>>>>>
>>>>> One idea I thought of is to use seqlock to sync changing & reading q->rpm_status,
>>>>> and looks read lock(read_seqcount_begin/read_seqcount_retry) shouldn't introduce
>>>>> big cost in fast path.
>>>>
>>>> Let's please keep in mind that this is runtime pm stuff. Better to
>>>> make the rules relaxed around it, instead of adding synchronization.
>>>
>>> But the race has to be avoided, otherwise IO may be failed. I don't
>>> find any simple solution yet for avoiding the race without adding sync.
>>>
>>> Any idea for avoiding the race without using sync like seqlock or others?
>>
>> I just don't want anything like this in the hot path. Why can't we
>> handle this similarly to how we handle request timeouts? It'll
>> potentially delay the suspend by a few seconds, but surely that can't be
>> a big deal. I don't see why we need to track this on a per-request
>> basis.
> 
> For legacy path, there is the queue lock, so no the race mentioned.
> 
> I guess you mean why we can't use RCU style to deal with this issue, so
> we don't introduce cost in fast path, but the problem is that IO has
> to be submitted to one active hardware, that is one invariant of runtime
> PM.
> 
> So RCU/SRCU won't fix this issue because the rcu_read_lock sync nothing,
> and we have to make sure that hardware is ready before dispatching IO to
> hardware/driver. That is why I think sort of sync is required in IO path.

That's not what I meant at all. As I wrote above, I don't want it in the
hot path at all, and certainly not as a per-request thing. We already
have a timer on blk-mq that runs while requests are pending, by
definition the last time that timer triggers, the device is idle. If you
need to do heavier lifting to ensure we only runtime suspend at that
point, then THAT'S the place to do it, not adding extra code per
request. I don't care how cheap the perceived locking is, it's still
extra code and checks for each and every request. That is what I am
objecting to.

Who even uses the runtime pm stuff?
Alan Stern July 13, 2018, 8:27 p.m. UTC | #9
On Fri, 13 Jul 2018, Jens Axboe wrote:

> >>> Any idea for avoiding the race without using sync like seqlock or others?
> >>
> >> I just don't want anything like this in the hot path. Why can't we
> >> handle this similarly to how we handle request timeouts? It'll
> >> potentially delay the suspend by a few seconds, but surely that can't be
> >> a big deal. I don't see why we need to track this on a per-request
> >> basis.
> > 
> > For legacy path, there is the queue lock, so no the race mentioned.
> > 
> > I guess you mean why we can't use RCU style to deal with this issue, so
> > we don't introduce cost in fast path, but the problem is that IO has
> > to be submitted to one active hardware, that is one invariant of runtime
> > PM.
> > 
> > So RCU/SRCU won't fix this issue because the rcu_read_lock sync nothing,
> > and we have to make sure that hardware is ready before dispatching IO to
> > hardware/driver. That is why I think sort of sync is required in IO path.
> 
> That's not what I meant at all. As I wrote above, I don't want it in the
> hot path at all, and certainly not as a per-request thing. We already
> have a timer on blk-mq that runs while requests are pending, by
> definition the last time that timer triggers, the device is idle. If you
> need to do heavier lifting to ensure we only runtime suspend at that
> point, then THAT'S the place to do it, not adding extra code per
> request. I don't care how cheap the perceived locking is, it's still
> extra code and checks for each and every request. That is what I am
> objecting to.

The problem occurs on the opposite side: when a new request is added,
we don't want it to race with a just-started suspend transition.  Can
you suggest a way to prevent this without adding any overhead to the
hot path?

For that matter, we also have the issue of checking whether the device
is already suspended when a request is added; in that case we have to
resume the device before issuing the request.  I'm not aware of any way
to avoid performing this check in the hot path.

Is there already some synchronization in place for plugging or stopping 
a request queue?  If there is, could the runtime-PM code make use of 
it?  We might need to add a state in which a queue is blocked for 
normal requests but allows PM-related request to run.

Alan Stern
Jens Axboe July 13, 2018, 8:39 p.m. UTC | #10
On 7/13/18 2:27 PM, Alan Stern wrote:
> On Fri, 13 Jul 2018, Jens Axboe wrote:
> 
>>>>> Any idea for avoiding the race without using sync like seqlock or others?
>>>>
>>>> I just don't want anything like this in the hot path. Why can't we
>>>> handle this similarly to how we handle request timeouts? It'll
>>>> potentially delay the suspend by a few seconds, but surely that can't be
>>>> a big deal. I don't see why we need to track this on a per-request
>>>> basis.
>>>
>>> For legacy path, there is the queue lock, so no the race mentioned.
>>>
>>> I guess you mean why we can't use RCU style to deal with this issue, so
>>> we don't introduce cost in fast path, but the problem is that IO has
>>> to be submitted to one active hardware, that is one invariant of runtime
>>> PM.
>>>
>>> So RCU/SRCU won't fix this issue because the rcu_read_lock sync nothing,
>>> and we have to make sure that hardware is ready before dispatching IO to
>>> hardware/driver. That is why I think sort of sync is required in IO path.
>>
>> That's not what I meant at all. As I wrote above, I don't want it in the
>> hot path at all, and certainly not as a per-request thing. We already
>> have a timer on blk-mq that runs while requests are pending, by
>> definition the last time that timer triggers, the device is idle. If you
>> need to do heavier lifting to ensure we only runtime suspend at that
>> point, then THAT'S the place to do it, not adding extra code per
>> request. I don't care how cheap the perceived locking is, it's still
>> extra code and checks for each and every request. That is what I am
>> objecting to.
> 
> The problem occurs on the opposite side: when a new request is added,
> we don't want it to race with a just-started suspend transition.  Can
> you suggest a way to prevent this without adding any overhead to the
> hot path?
> 
> For that matter, we also have the issue of checking whether the device
> is already suspended when a request is added; in that case we have to
> resume the device before issuing the request.  I'm not aware of any way
> to avoid performing this check in the hot path.

The issue is on both sides, of course. The problem, to me, appears to be
attempting to retrofit the old pre-suspend checking to blk-mq, where it
could be done a lot more optimally by having the suspend side be driven
by the timeout timer, and resume could be driven by first request
entering on an idle queue.

Doing a per-request inc/dec type of tracking with synchronization is the
easy approach/lazy approach, but it's also woefully inefficient. Any
sort of per-queue tracking for blk-mq is not a great idea.

> Is there already some synchronization in place for plugging or stopping 
> a request queue?  If there is, could the runtime-PM code make use of 
> it?  We might need to add a state in which a queue is blocked for 
> normal requests but allows PM-related request to run.

Sure, blk-mq has a plethora of helpers for that, since we use it in
other places as well. And it might not be a bad idea to extend that to
cover this case as well. See blk_queue_enter() and the queue freeze and
queuescing. This is already per-request overhead we're paying.
Ming Lei July 13, 2018, 10:47 p.m. UTC | #11
On Fri, Jul 13, 2018 at 02:39:15PM -0600, Jens Axboe wrote:
> On 7/13/18 2:27 PM, Alan Stern wrote:
> > On Fri, 13 Jul 2018, Jens Axboe wrote:
> > 
> >>>>> Any idea for avoiding the race without using sync like seqlock or others?
> >>>>
> >>>> I just don't want anything like this in the hot path. Why can't we
> >>>> handle this similarly to how we handle request timeouts? It'll
> >>>> potentially delay the suspend by a few seconds, but surely that can't be
> >>>> a big deal. I don't see why we need to track this on a per-request
> >>>> basis.
> >>>
> >>> For legacy path, there is the queue lock, so no the race mentioned.
> >>>
> >>> I guess you mean why we can't use RCU style to deal with this issue, so
> >>> we don't introduce cost in fast path, but the problem is that IO has
> >>> to be submitted to one active hardware, that is one invariant of runtime
> >>> PM.
> >>>
> >>> So RCU/SRCU won't fix this issue because the rcu_read_lock sync nothing,
> >>> and we have to make sure that hardware is ready before dispatching IO to
> >>> hardware/driver. That is why I think sort of sync is required in IO path.
> >>
> >> That's not what I meant at all. As I wrote above, I don't want it in the
> >> hot path at all, and certainly not as a per-request thing. We already
> >> have a timer on blk-mq that runs while requests are pending, by
> >> definition the last time that timer triggers, the device is idle. If you
> >> need to do heavier lifting to ensure we only runtime suspend at that
> >> point, then THAT'S the place to do it, not adding extra code per
> >> request. I don't care how cheap the perceived locking is, it's still
> >> extra code and checks for each and every request. That is what I am
> >> objecting to.
> > 
> > The problem occurs on the opposite side: when a new request is added,
> > we don't want it to race with a just-started suspend transition.  Can
> > you suggest a way to prevent this without adding any overhead to the
> > hot path?
> > 
> > For that matter, we also have the issue of checking whether the device
> > is already suspended when a request is added; in that case we have to
> > resume the device before issuing the request.  I'm not aware of any way
> > to avoid performing this check in the hot path.
> 
> The issue is on both sides, of course. The problem, to me, appears to be
> attempting to retrofit the old pre-suspend checking to blk-mq, where it
> could be done a lot more optimally by having the suspend side be driven
> by the timeout timer, and

Timeout timer won't be one accepted way from user view since the default
timeout is 30s, but autosuspend delay may be just several seconds, but
that isn't the current problem.

Now the problem isn't in suspend side, I did think about this way.
The issue is that we could enter suspending when queue isn't idle, like
there are only RQF_PM requests pended.

> resume could be driven by first request entering on an idle queue.

The 1st request may be RQF_PM request.

> 
> Doing a per-request inc/dec type of tracking with synchronization is the
> easy approach/lazy approach, but it's also woefully inefficient. Any
> sort of per-queue tracking for blk-mq is not a great idea.
> 
> > Is there already some synchronization in place for plugging or stopping 
> > a request queue?  If there is, could the runtime-PM code make use of 
> > it?  We might need to add a state in which a queue is blocked for 
> > normal requests but allows PM-related request to run.
> 
> Sure, blk-mq has a plethora of helpers for that, since we use it in
> other places as well. And it might not be a bad idea to extend that to
> cover this case as well. See blk_queue_enter() and the queue freeze and
> queuescing. This is already per-request overhead we're paying.

Before entering runtime suspend, there can't be any normal in-flight IO, so
quiesce won't work since quiesce only prevents new dispatching and doesn't
drain queue.

Freeze might work, but blk_queue_enter() will becomes a bit complicated:

1) queue freeze can be done before runtime suspending

2) when any new IO comes, the queue has to be unfrozen.

3) when any RQF_PM request comes, the queue has to be kept in freezing,
and allows this request to be crossed.

I will think about this approach further and see if it can be done in
easy way.


Thanks,
Ming
diff mbox

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index c4b57d8806fe..bf66d561980d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3804,12 +3804,17 @@  EXPORT_SYMBOL(blk_pm_runtime_init);
 int blk_pre_runtime_suspend(struct request_queue *q)
 {
 	int ret = 0;
+	bool active;
 
 	if (!q->dev)
 		return ret;
 
 	spin_lock_irq(q->queue_lock);
-	if (q->nr_pending) {
+	if (!q->mq_ops)
+		active = !!q->nr_pending;
+	else
+		active = !blk_mq_pm_queue_idle(q);
+	if (active) {
 		ret = -EBUSY;
 		pm_runtime_mark_last_busy(q->dev);
 	} else {
@@ -3893,7 +3898,10 @@  void blk_post_runtime_resume(struct request_queue *q, int err)
 	spin_lock_irq(q->queue_lock);
 	if (!err) {
 		q->rpm_status = RPM_ACTIVE;
-		__blk_run_queue(q);
+		if (!q->mq_ops)
+			__blk_run_queue(q);
+		else
+			blk_mq_run_hw_queues(q, true);
 		pm_runtime_mark_last_busy(q->dev);
 		pm_request_autosuspend(q->dev);
 	} else {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3a78fed87959..50dd259f798f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -25,6 +25,7 @@ 
 #include <linux/delay.h>
 #include <linux/crash_dump.h>
 #include <linux/prefetch.h>
+#include <linux/pm_runtime.h>
 
 #include <trace/events/block.h>
 
@@ -80,6 +81,25 @@  bool blk_mq_pm_queue_idle(struct request_queue *q)
 	return idle_cnt == 0;
 }
 
+static void blk_mq_pm_add_request(struct request_queue *q, struct request *rq)
+{
+	if (!blk_mq_support_runtime_pm(q))
+		return;
+
+	if (q->dev && !(rq->rq_flags & RQF_PM) &&
+	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
+		pm_request_resume(q->dev);
+}
+
+static void blk_mq_pm_put_request(struct request_queue *q, struct request *rq)
+{
+	if (!blk_mq_support_runtime_pm(q))
+		return;
+
+	if (q->dev && !(rq->rq_flags & RQF_PM))
+		pm_runtime_mark_last_busy(q->dev);
+}
+
 /*
  * Check if any of the ctx's have pending work in this hardware queue
  */
@@ -531,6 +551,8 @@  void blk_mq_free_request(struct request *rq)
 	if (blk_rq_rl(rq))
 		blk_put_rl(blk_rq_rl(rq));
 
+	blk_mq_pm_put_request(q, rq);
+
 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
 	if (refcount_dec_and_test(&rq->ref))
 		__blk_mq_free_request(rq);
@@ -1841,6 +1863,8 @@  static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 	rq_qos_track(q, rq, bio);
 
+	blk_mq_pm_add_request(q, rq);
+
 	cookie = request_to_qc_t(data.hctx, rq);
 
 	plug = current->plug;